repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
probml/pyprobml | scripts/Error_correcting_code_demo.py | 1 | 1968 | # Illustrate parity check code using a directed graphical model
# Authors: murphyk@, Drishtii@
# Based on
#https://github.com/probml/pmtk3/blob/master/demos/errorCorrectingCodeDemo.m
#!pip install pgmpy
import pyprobml_utils as pml
import pgmpy_utils as pgm
from pgmpy.models import BayesianModel
from pgmpy.factors.discrete import TabularCPD
import numpy as np
import matplotlib.pyplot as plt
# DAG structure
model = BayesianModel([ ('X2', 'X3'), ('X1', 'X3'), ('X1', 'Y1'), ('X2', 'Y2'), ('X3', 'Y3')])
# Defining individual CPDs.
CPDs = {}
CPDs['X1'] = TabularCPD(variable='X1', variable_card=2, values=[[0.5], [0.5]])
CPDs['X2'] = TabularCPD(variable='X2', variable_card=2, values=[[0.5], [0.5]])
CPDs['X3'] = TabularCPD(variable='X3', variable_card=2,
values=[[1, 0, 0, 1], [0, 1, 1, 0]],
evidence=['X1', 'X2'],
evidence_card=[2, 2])
noise = 0.2
for i in range(3):
parent = 'X{}'.format(i + 1)
child = 'Y{}'.format(i + 1)
CPDs[child] = TabularCPD(variable=child, variable_card=2,
values=[[1-noise, noise], [noise, 1-noise]],
evidence=[parent],
evidence_card=[2])
# Make model
for cpd in CPDs.values():
model.add_cpds(cpd)
model.check_model()
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
# Inference
evidence = {'Y1': 1, 'Y2': 0, 'Y3': 0}
marginals = {}
for i in range(3):
name = 'X{}'.format(i+1)
post= infer.query([name], evidence=evidence).values
marginals[name] = post
print(marginals)
joint = infer.query(['X1','X2','X3'], evidence=evidence).values
J = joint.reshape(8)
fig, ax = plt.subplots()
plt.title('p(x|y=1,0,0)')
y = ['0' ,'000', '001', '010', '011', '100', '101', '110', '111']
ax.bar(x = np.arange(8), height=J)
ax.set_xticklabels(y, rotation = 90)
pml.savesfig('error_correcting.pdf')
plt.savefig('error_correcting.pdf')
plt.show()
pgm.visualize_model(model)
| mit |
mkukielka/oddt | oddt/scoring/functions/PLECscore.py | 1 | 14458 | from __future__ import print_function
import sys
from os.path import dirname, isfile, join as path_join
from functools import partial
import json
import warnings
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from sklearn.metrics import r2_score
from sklearn import __version__ as sklearn_version
from sklearn.linear_model import SGDRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from oddt.metrics import rmse, standard_deviation_error
from oddt.scoring import scorer
from oddt.fingerprints import PLEC, MAX_HASH_VALUE
from oddt.scoring.descriptors import universal_descriptor
class PLECscore(scorer):
def __init__(self, protein=None, n_jobs=-1, version='linear',
depth_protein=5, depth_ligand=1, size=65536):
"""PLECscore - a novel scoring function based on PLEC fingerprints. The
underlying model can be one of:
* linear regression
* neural network (dense, 200x200x200)
* random forest (100 trees)
The scoring function is trained on PDBbind v2016 database and even with
linear model outperforms other machine-learning ones in terms of Pearson
correlation coefficient on "core set". For details see PLEC publication.
PLECscore predicts binding affinity (pKi/d).
.. versionadded:: 0.6
Parameters
----------
protein : oddt.toolkit.Molecule object
Receptor for the scored ligands
n_jobs: int (default=-1)
Number of cores to use for scoring and training. By default (-1)
all cores are allocated.
version: str (default='linear')
A version of scoring function ('linear', 'nn' or 'rf') - which
model should be used for the scoring function.
depth_protein: int (default=5)
The depth of ECFP environments generated on the protein side of
interaction. By default 6 (0 to 5) environments are generated.
depth_ligand: int (default=1)
The depth of ECFP environments generated on the ligand side of
interaction. By default 2 (0 to 1) environments are generated.
size: int (default=65536)
The final size of a folded PLEC fingerprint. This setting is not
used to limit the data encoded in PLEC fingerprint (for that
tune the depths), but only the final lenght. Setting it to too
low value will lead to many collisions.
"""
self.protein = protein
self.n_jobs = n_jobs
self.version = version
self.depth_protein = depth_protein
self.depth_ligand = depth_ligand
self.size = size
plec_func = partial(PLEC,
depth_ligand=depth_ligand,
depth_protein=depth_protein,
size=size,
count_bits=True,
sparse=True,
ignore_hoh=True)
descriptors = universal_descriptor(plec_func, protein=protein,
shape=size, sparse=True)
if version == 'linear':
# avoid deprecation warnings
kwargs = {'fit_intercept': False,
'loss': 'huber',
'penalty': 'elasticnet',
'random_state': 0,
'verbose': 0,
'alpha': 1e-4,
'epsilon': 1e-1,
}
if sklearn_version >= '0.19':
kwargs['max_iter'] = 100
else:
kwargs['n_iter'] = 100
model = SGDRegressor(**kwargs)
elif version == 'nn':
model = MLPRegressor((200, 200, 200),
batch_size=10,
random_state=0,
verbose=0,
solver='lbfgs')
elif version == 'rf':
model = RandomForestRegressor(n_estimators=100,
n_jobs=n_jobs,
verbose=0,
oob_score=True,
random_state=0)
else:
raise ValueError('The version "%s" is not supported by PLECscore'
% version)
super(PLECscore, self).__init__(model, descriptors,
score_title='PLEC%s_p%i_l%i_s%i' %
(version, depth_protein, depth_ligand,
size))
def gen_training_data(self,
pdbbind_dir,
pdbbind_versions=(2016,),
home_dir=None,
use_proteins=True):
if home_dir is None:
home_dir = path_join(dirname(__file__), 'PLECscore')
filename = path_join(home_dir, 'plecscore_descs_p%i_l%i.csv.gz' %
(self.depth_protein, self.depth_ligand))
# The CSV will contain unfolded FP
self.descriptor_generator.func.keywords['size'] = MAX_HASH_VALUE
self.descriptor_generator.shape = MAX_HASH_VALUE
super(PLECscore, self)._gen_pdbbind_desc(
pdbbind_dir=pdbbind_dir,
pdbbind_versions=pdbbind_versions,
desc_path=filename,
include_general_set=True,
use_proteins=use_proteins,
)
# reset to the original size
self.descriptor_generator.func.keywords['size'] = self.size
self.descriptor_generator.shape = self.size
def gen_json(self, home_dir=None, pdbbind_version=2016):
if not home_dir:
home_dir = path_join(dirname(__file__), 'PLECscore')
if isinstance(self.model, SGDRegressor):
attributes = ['coef_', 'intercept_', 't_']
elif isinstance(self.model, MLPRegressor):
attributes = ['loss_', 'coefs_', 'intercepts_', 'n_iter_',
'n_layers_', 'n_outputs_', 'out_activation_']
out = {}
for attr_name in attributes:
attr = getattr(self.model, attr_name)
# convert numpy arrays to list for json
if isinstance(attr, np.ndarray):
attr = attr.tolist()
elif (isinstance(attr, (list, tuple)) and
isinstance(attr[0], np.ndarray)):
attr = [x.tolist() for x in attr]
out[attr_name] = attr
json_path = path_join(home_dir, 'plecscore_%s_p%i_l%i_s%i_pdbbind%i.json' %
(self.version, self.depth_protein,
self.depth_ligand, self.size, pdbbind_version))
with open(json_path, 'w') as json_f:
json.dump(out, json_f, indent=2)
return json_path
def train(self, home_dir=None, sf_pickle=None, pdbbind_version=2016,
ignore_json=False):
if not home_dir:
home_dir = path_join(dirname(__file__), 'PLECscore')
desc_path = path_join(home_dir, 'plecscore_descs_p%i_l%i.csv.gz' %
(self.depth_protein, self.depth_ligand))
json_path = path_join(
home_dir, 'plecscore_%s_p%i_l%i_s%i_pdbbind%i.json' %
(self.version, self.depth_protein,
self.depth_ligand, self.size, pdbbind_version))
if (self.version in ['linear'] and # TODO: support other models
isfile(json_path) and
not ignore_json):
print('Loading pretrained PLECscore %s with depths P%i L%i on '
'PDBBind v%i'
% (self.version, self.depth_protein, self.depth_ligand,
pdbbind_version), file=sys.stderr)
with open(json_path) as json_f:
json_data = json.load(json_f)
for k, v in json_data.items():
if isinstance(v, list):
if isinstance(v[0], list):
v = [np.array(x) for x in v]
else:
v = np.array(v)
setattr(self.model, k, v)
else:
# blacklist core set 2013 and astex
pdbids_blacklist = [
'3ao4', '3i3b', '1uto', '1ps3', '1qi0', '3g2z', '3dxg', '3l7b',
'3mfv', '3b3s', '3kgp', '3fk1', '3fcq', '3lka', '3udh', '4gqq',
'3imc', '2xdl', '2ymd', '1lbk', '1bcu', '3zsx', '1f8d', '3muz',
'2v00', '1loq', '3n7a', '2r23', '3nq3', '2hb1', '2w66', '1n2v',
'3kwa', '3g2n', '4de2', '3ozt', '3b3w', '3cft', '3f3a', '2qmj',
'3f80', '1a30', '1w3k', '3ivg', '2jdy', '3u9q', '3pxf', '2wbg',
'1u33', '2x0y', '3mss', '1vso', '1q8t', '3acw', '3bpc', '3vd4',
'3cj2', '2brb', '1p1q', '2vo5', '3d4z', '2gss', '2yge', '3gy4',
'3zso', '3ov1', '1w4o', '1zea', '2zxd', '3ueu', '2qft', '1gpk',
'1f8b', '2jdm', '3su5', '2wca', '3n86', '2x97', '1n1m', '1o5b',
'2y5h', '3ehy', '4des', '3ebp', '1q8u', '4de1', '3huc', '3l4w',
'2vl4', '3coy', '3f3c', '1os0', '3owj', '3bkk', '1yc1', '1hnn',
'3vh9', '3bfu', '1w3l', '3k5v', '2qbr', '1lol', '10gs', '2j78',
'1r5y', '2weg', '3uo4', '3jvs', '2yfe', '1sln', '2iwx', '2jdu',
'4djv', '2xhm', '2xnb', '3s8o', '2zcr', '3oe5', '3gbb', '2d3u',
'3uex', '4dew', '1xd0', '1z95', '2vot', '1oyt', '2ole', '3gcs',
'1kel', '2vvn', '3kv2', '3pww', '3su2', '1f8c', '2xys', '3l4u',
'2xb8', '2d1o', '2zjw', '3f3e', '2g70', '2zwz', '1u1b', '4g8m',
'1o3f', '2x8z', '3cyx', '2cet', '3ag9', '2pq9', '3l3n', '1nvq',
'2cbj', '2v7a', '1h23', '2qbp', '3b68', '2xbv', '2fvd', '2vw5',
'3ejr', '3f17', '3nox', '1hfs', '1jyq', '2pcp', '3ge7', '2wtv',
'2zcq', '2obf', '3e93', '2p4y', '3dd0', '3nw9', '3uri', '3gnw',
'3su3', '2xy9', '1sqa', '3fv1', '2yki', '3g0w', '3pe2', '1e66',
'1igj', '4tmn', '2zx6', '3myg', '4gid', '3utu', '1lor', '1mq6',
'2x00', '2j62', '4djr', '1gm8', '1gpk', '1hnn', '1hp0', '1hq2',
'1hvy', '1hwi', '1hww', '1ia1', '1j3j', '1jd0', '1jje', '1ke5',
'1kzk', '1l2s', '1l7f', '1lpz', '1m2z', '1mmv', '1mzc', '1n1m',
'1n2v', '1n46', '1nav', '1of1', '1of6', '1opk', '1oq5', '1owe',
'1oyt', '1p2y', '1p62', '1pmn', '1q1g', '1q41', '1q4g', '1r1h',
'1r55', '1r58', '1r9o', '1s19', '1s3v', '1sg0', '1sj0', '1sq5',
'1sqn', '1t40', '1t46', '1t9b', '1tow', '1tt1', '1u1c', '1uml',
'1unl', '1uou', '1v0p', '1v48', '1v4s', '1vcj', '1w1p', '1w2g',
'1xm6', '1xoq', '1xoz', '1y6b', '1ygc', '1yqy', '1yv3', '1yvf',
'1ywr', '1z95', '2bm2', '2br1', '2bsm']
# use remote csv if it's not present
if not isfile(desc_path):
branch = 'master' # define branch/commit
desc_url = ('https://raw.githubusercontent.com/oddt/oddt/%s'
'/oddt/scoring/functions/PLECscore/'
'plecscore_descs_p%i_l%i.csv.gz' %
(branch, self.depth_protein, self.depth_ligand))
warnings.warn('The CSV for PLEC P%i L%i is missing. Trying to '
'get it from ODDT GitHub.' % (self.depth_protein,
self.depth_ligand))
# download and save CSV
pd.read_csv(desc_url, index_col='pdbid').to_csv(
desc_path, compression='gzip')
# set PLEC size to unfolded
super(PLECscore, self)._load_pdbbind_desc(
desc_path,
train_set=('general', 'refined'),
pdbbind_version=pdbbind_version,
train_blacklist=pdbids_blacklist,
fold_size=self.size,
)
print('Training PLECscore %s with depths P%i L%i on PDBBind v%i'
% (self.version, self.depth_protein, self.depth_ligand,
pdbbind_version), file=sys.stderr)
self.model.fit(self.train_descs, self.train_target)
sets = [
('Test', self.model.predict(self.test_descs), self.test_target),
('Train', self.model.predict(self.train_descs), self.train_target)]
if self.version == 'rf':
sets.append(('OOB', self.model.oob_prediction_, self.train_target))
for name, pred, target in sets:
print('%s set:' % name,
'R2_score: %.4f' % r2_score(target, pred),
'Rp: %.4f' % pearsonr(target, pred)[0],
'RMSE: %.4f' % rmse(target, pred),
'SD: %.4f' % standard_deviation_error(target, pred),
sep='\t', file=sys.stderr)
if sf_pickle is None:
return self.save('PLEC%s_p%i_l%i_pdbbind%i_s%i.pickle'
% (self.version, self.depth_protein,
self.depth_ligand, pdbbind_version, self.size))
else:
return self.save(sf_pickle)
@classmethod
def load(self, filename=None, version='linear', pdbbind_version=2016,
depth_protein=5, depth_ligand=1, size=65536):
if filename is None:
# FIXME: it would be cool to have templates of names for a class
fname = ('PLEC%s_p%i_l%i_pdbbind%i_s%i.pickle' %
(version, depth_protein, depth_ligand,
pdbbind_version, size))
for f in [fname, path_join(dirname(__file__), fname)]:
if isfile(f):
filename = f
break
else:
print('No pickle, training new scoring function.',
file=sys.stderr)
sf = PLECscore(version=version)
filename = sf.train(sf_pickle=filename,
pdbbind_version=pdbbind_version)
return scorer.load(filename)
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexes/test_base.py | 1 | 101055 | # -*- coding: utf-8 -*-
import math
import operator
from collections import defaultdict
from datetime import datetime, timedelta
from decimal import Decimal
import numpy as np
import pytest
import pandas as pd
import pandas.core.config as cf
import pandas.util.testing as tm
from pandas import (
CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, Int64Index,
PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, date_range,
isna, period_range
)
from pandas._libs.tslib import Timestamp
from pandas.compat import (
PY3, PY35, PY36, StringIO, lrange, lzip, range, text_type, u, zip
)
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.dtypes.generic import ABCIndex
from pandas.core.index import _get_combined_index, ensure_index_from_sequences
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.indexes.datetimes import _to_m8
from pandas.tests.indexes.common import Base
from pandas.util.testing import assert_almost_equal
class TestIndex(Base):
_holder = Index
def setup_method(self, method):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
uintIndex=tm.makeUIntIndex(100),
rangeIndex=tm.makeRangeIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])),
repeats=Index([0, 0, 1, 1, 2, 2]))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def generate_index_types(self, skip_index_keys=[]):
"""
Return a generator of the various index types, leaving
out the ones with a key in skip_index_keys
"""
for key, index in self.indices.items():
if key not in skip_index_keys:
yield key, index
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self, indices):
super(TestIndex, self).test_copy_and_deepcopy(indices)
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
@pytest.mark.parametrize("attr", ['strIndex', 'dateIndex'])
def test_constructor_regular(self, attr):
# regular instance creation
index = getattr(self, attr)
tm.assert_contains_all(index, index)
def test_constructor_casting(self):
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
def test_constructor_copy(self):
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
assert isinstance(index, Index)
assert index.name == 'name'
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
pytest.raises(TypeError, Index, 0)
@pytest.mark.parametrize("index_vals", [
[('A', 1), 'B'], ['B', ('A', 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
@pytest.mark.parametrize('na_value', [None, np.nan])
@pytest.mark.parametrize('vtype', [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH 18505 : valid tuples containing NaN
values = [(1, 'two'), (3., na_value)]
result = Index(vtype(values))
expected = MultiIndex.from_tuples(values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize("index", [
pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern', name='Green Eggs & Ham'), # DTI with tz
pd.date_range('2015-01-01 10:00', freq='D', periods=3), # DTI no tz
pd.timedelta_range('1 days', freq='D', periods=3), # td
pd.period_range('2015-01-01', freq='D', periods=3) # period
])
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
result = pd.Index(index.astype(object))
else:
result = pd.Index(index)
tm.assert_index_equal(result, index)
if isinstance(index, pd.DatetimeIndex):
assert result.tz == index.tz
if cast_as_obj:
# GH#23524 check that Index(dti, dtype=object) does not
# incorrectly raise ValueError, and that nanoseconds are not
# dropped
index += pd.Timedelta(nanoseconds=50)
result = pd.Index(index, dtype=object)
assert result.dtype == np.object_
assert list(result) == list(index)
@pytest.mark.parametrize("index,has_tz", [
(pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), True), # datetimetz
(pd.timedelta_range('1 days', freq='D', periods=3), False), # td
(pd.period_range('2015-01-01', freq='D', periods=3), False) # period
])
def test_constructor_from_series_dtlike(self, index, has_tz):
result = pd.Index(pd.Series(index))
tm.assert_index_equal(result, index)
if has_tz:
assert result.tz == index.tz
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series(self, klass):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = klass(s)
tm.assert_index_equal(result, expected)
def test_constructor_from_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
s = Series(pd.to_datetime(dts))
result = DatetimeIndex(s, freq='MS')
tm.assert_index_equal(result, expected)
def test_constructor_from_frame_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = dts
result = DatetimeIndex(df['date'], freq='MS')
assert df['date'].dtype == object
expected.name = 'date'
tm.assert_index_equal(result, expected)
expected = pd.Series(dts, name='date')
tm.assert_series_equal(df['date'], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df['date'])
assert freq == 'MS'
@pytest.mark.parametrize("array", [
np.arange(5), np.array(['a', 'b', 'c']), date_range(
'2000-01-01', periods=3).values
])
def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('dtype', [
int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32',
'uint16', 'uint8'])
def test_constructor_int_dtype_float(self, dtype):
# GH 18400
if is_unsigned_integer_dtype(dtype):
index_type = UInt64Index
else:
index_type = Int64Index
expected = index_type([0, 1, 2, 3])
result = Index([0., 1., 2., 3.], dtype=dtype)
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
expected = Float64Index(data)
result = Index(data, dtype='float')
tm.assert_index_equal(result, expected)
def test_droplevel(self, indices):
# GH 21115
if isinstance(indices, MultiIndex):
# Tested separately in test_multi.py
return
assert indices.droplevel([]).equals(indices)
for level in indices.name, [indices.name]:
if isinstance(indices.name, tuple) and level is indices.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
indices.droplevel(level)
for level in 'wrong', ['wrong']:
with pytest.raises(KeyError):
indices.droplevel(level)
@pytest.mark.parametrize("dtype", ['int64', 'uint64'])
def test_constructor_int_dtype_nan_raises(self, dtype):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with pytest.raises(ValueError, match=msg):
Index(data, dtype=dtype)
@pytest.mark.parametrize("klass,dtype,na_val", [
(pd.Float64Index, np.float64, np.nan),
(pd.DatetimeIndex, 'datetime64[ns]', pd.NaT)
])
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
expected = klass(na_list)
assert expected.dtype == dtype
result = Index(na_list)
tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos", [0, 1])
@pytest.mark.parametrize("klass,dtype,ctor", [
(pd.DatetimeIndex, 'datetime64[ns]', np.datetime64('nat')),
(pd.TimedeltaIndex, 'timedelta64[ns]', np.timedelta64('nat'))
])
def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor,
nulls_fixture):
expected = klass([pd.NaT, pd.NaT])
assert expected.dtype == dtype
data = [ctor]
data.insert(pos, nulls_fixture)
result = Index(data)
tm.assert_index_equal(result, expected)
result = Index(np.array(data, dtype=object))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("swap_objs", [True, False])
def test_index_ctor_nat_result(self, swap_objs):
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
if swap_objs:
data = data[::-1]
expected = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), expected)
tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
@pytest.mark.parametrize("vals,dtype", [
([1, 2, 3, 4, 5], 'int'), ([1.1, np.nan, 2.2, 3.0], 'float'),
(['A', 'B', 'C', np.nan], 'obj')
])
def test_constructor_simple_new(self, vals, dtype):
index = Index(vals, name=dtype)
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int),
# below should coerce
[1., 2., 3.], np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_int64(self, vals):
index = Index(vals, dtype=int)
assert isinstance(index, Int64Index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], [1., 2., 3.], np.array([1., 2., 3.]),
np.array([1, 2, 3], dtype=int), np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_float64(self, vals):
index = Index(vals, dtype=float)
assert isinstance(index, Float64Index)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
[True, False, True], np.array([True, False, True], dtype=bool)
])
def test_constructor_dtypes_to_object(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=bool)
else:
index = Index(vals)
assert isinstance(index, Index)
assert index.dtype == object
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3], dtype=int),
np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
[datetime(2011, 1, 1), datetime(2011, 1, 2)]
])
def test_constructor_dtypes_to_categorical(self, vals):
index = Index(vals, dtype='category')
assert isinstance(index, CategoricalIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])
])
def test_constructor_dtypes_to_datetime(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, DatetimeIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]),
[timedelta(1), timedelta(1)]
])
def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("attr, utc", [
['values', False],
['asi8', True]])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
klass):
# Test constructing with a datetimetz dtype
# .values produces numpy datetimes, so these are considered naive
# .asi8 produces integers, so these are considered epoch timestamps
index = pd.date_range('2011-01-01', periods=5)
arg = getattr(index, attr)
if utc:
index = index.tz_localize('UTC').tz_convert(tz_naive_fixture)
else:
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
index = pd.timedelta_range('1 days', periods=5)
dtype = index.dtype
values = getattr(index, attr)
result = klass(values, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(values), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("value", [[], iter([]), (x for x in [])])
@pytest.mark.parametrize("klass",
[Index, Float64Index, Int64Index, UInt64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex])
def test_constructor_empty(self, value, klass):
empty = klass(value)
assert isinstance(empty, klass)
assert not len(empty)
@pytest.mark.parametrize("empty,klass", [
(PeriodIndex([], freq='B'), PeriodIndex),
(PeriodIndex(iter([]), freq='B'), PeriodIndex),
(PeriodIndex((x for x in []), freq='B'), PeriodIndex),
(RangeIndex(step=1), pd.RangeIndex),
(MultiIndex(levels=[[1, 2], ['blue', 'red']],
labels=[[], []]), MultiIndex)
])
def test_constructor_empty_special(self, empty, klass):
assert isinstance(empty, klass)
assert not len(empty)
def test_constructor_non_hashable_name(self, indices):
# GH 20527
if isinstance(indices, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [['1']]
# With .rename()
with pytest.raises(TypeError, match=message):
indices.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
indices.set_names(names=renamed)
def test_constructor_overflow_int64(self):
# see gh-15832
msg = ("The elements provided in the data cannot "
"all be casted to the dtype int64")
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
@pytest.mark.xfail(reason="see GH#21311: Index "
"doesn't enforce dtype argument",
strict=True)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
pytest.raises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
assert casted.name == 'foobar'
def test_equals_object(self):
# same
assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
@pytest.mark.parametrize("comp", [
Index(['a', 'b']), Index(['a', 'b', 'd']), ['a', 'b', 'c']])
def test_not_equals_object(self, comp):
assert not Index(['a', 'b', 'c']).equals(comp)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_insert_missing(self, nulls_fixture):
# GH 22295
# test there is no mangling of NA values
expected = Index(['a', nulls_fixture, 'b', 'c'])
result = Index(list('abc')).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos,expected", [
(0, Index(['b', 'c', 'd'], name='index')),
(-1, Index(['a', 'b', 'c'], name='index'))
])
def test_delete(self, pos, expected):
index = Index(['a', 'b', 'c', 'd'], name='index')
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_delete_raises(self):
index = Index(['a', 'b', 'c', 'd'], name='index')
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
index.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
assert i1.identical(i2)
i1 = i1.rename('foo')
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
def test_asof(self):
d = self.dateIndex[0]
assert self.dateIndex.asof(d) == d
assert isna(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
assert self.dateIndex.asof(d + timedelta(1)) == d
d = self.dateIndex[0].to_pydatetime()
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = index.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
expected_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+'
'0000', 'ns')
assert first_value == x[Timestamp(expected_ts)]
@pytest.mark.parametrize("op", [
operator.eq, operator.ne, operator.gt, operator.lt,
operator.ge, operator.le
])
def test_comparators(self, op):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
def test_booleanindex(self):
boolIndex = np.repeat(True, len(self.strIndex)).astype(bool)
boolIndex[5:30:2] = False
subIndex = self.strIndex[boolIndex]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
subIndex = self.strIndex[list(boolIndex)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, attr, dtype):
empty_arr = np.array([], dtype=dtype)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
def test_empty_fancy_raises(self, attr):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
pytest.raises(IndexError, index.__getitem__, empty_farr)
@pytest.mark.parametrize("itm", [101, 'no_int'])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, indices, itm):
with pytest.raises(IndexError):
indices[itm]
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first)
assert inter is first
@pytest.mark.parametrize("index2,keeps_name", [
(Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
(Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
(Index([3, 4, 5, 6, 7]), False)])
def test_intersection_name_preservation(self, index2, keeps_name):
index1 = Index([1, 2, 3, 4, 5], name='index')
expected = Index([3, 4, 5])
result = index1.intersection(index2)
if keeps_name:
expected.name = 'index'
assert result.name == expected.name
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("first_name,second_name,expected_name", [
('A', 'A', 'A'), ('A', 'B', None), (None, 'B', None)])
def test_intersection_name_preservation2(self, first_name, second_name,
expected_name):
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second)
assert intersect.name == expected_name
@pytest.mark.parametrize("index2,keeps_name", [
(Index([4, 7, 6, 5, 3], name='index'), True),
(Index([4, 7, 6, 5, 3], name='other'), False)])
def test_intersection_monotonic(self, index2, keeps_name):
index1 = Index([5, 3, 2, 4, 1], name='index')
expected = Index([5, 3, 4])
if keeps_name:
expected.name = "index"
result = index1.intersection(index2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index2,expected_arr", [
(Index(['B', 'D']), ['B']),
(Index(['B', 'D', 'A']), ['A', 'B', 'A'])])
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr):
# non-monotonic non-unique
index1 = Index(['A', 'B', 'A', 'C'])
expected = Index(expected_arr, dtype='object')
result = index1.intersection(index2)
tm.assert_index_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
result = i2.intersection(i1)
assert len(result) == 0
@pytest.mark.parametrize(
'fname, sname, expected_name',
[
('A', 'A', 'A'),
('A', 'B', None),
('A', None, None),
(None, 'B', None),
(None, None, None),
])
def test_corner_union(self, indices, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(indices, MultiIndex) or not indices.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = indices.copy().set_names(fname)
second = indices.copy().set_names(sname)
union = first.union(second)
expected = indices.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = indices.copy().set_names(fname)
second = indices.drop(indices).set_names(sname)
union = first.union(second)
expected = indices.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = indices.drop(indices).set_names(fname)
second = indices.copy().set_names(sname)
union = first.union(second)
expected = indices.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = indices.drop(indices).set_names(fname)
second = indices.drop(indices).set_names(sname)
union = first.union(second)
expected = indices.drop(indices).set_names(expected_name)
tm.assert_index_equal(union, expected)
def test_chained_union(self):
# Chained unions handles names correctly
i1 = Index([1, 2], name='i1')
i2 = Index([3, 4], name='i2')
i3 = Index([5, 6], name='i3')
union = i1.union(i2.union(i3))
expected = i1.union(i2).union(i3)
tm.assert_index_equal(union, expected)
j1 = Index([1, 2], name='j1')
j2 = Index([], name='j2')
j3 = Index([], name='j3')
union = j1.union(j2.union(j3))
expected = j1.union(j2).union(j3)
tm.assert_index_equal(union, expected)
def test_union(self):
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
assert tm.equalContents(union, everything)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_from_iterables(self, klass):
# GH 10149
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
case = klass(second.values)
result = first.union(case)
assert tm.equalContents(result, everything)
def test_union_identity(self):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
union = first.union(first)
assert union is first
union = first.union([])
assert union is first
union = Index([]).union(first)
assert union is first
@pytest.mark.parametrize("first_list", [list('ab'), list()])
@pytest.mark.parametrize("second_list", [list('ab'), list()])
@pytest.mark.parametrize("first_name, second_name, expected_name", [
('A', 'B', None), (None, 'B', None), ('A', None, None)])
def test_union_name_preservation(self, first_list, second_list, first_name,
second_name, expected_name):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second)
vals = sorted(set(first_list).union(second_list))
expected = Index(vals, name=expected_name)
tm.assert_index_equal(union, expected)
def test_union_dt_as_obj(self):
# TODO: Replace with fixturesult
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
assert tm.equalContents(firstCat, appended)
assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
index = self.strIndex
expected = Index(self.strIndex.values * 2)
tm.assert_index_equal(index + index, expected)
tm.assert_index_equal(index + index.tolist(), expected)
tm.assert_index_equal(index.tolist() + index, expected)
# test add and radd
index = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
tm.assert_index_equal(index + '1', expected)
expected = Index(['1a', '1b', '1c'])
tm.assert_index_equal('1' + index, expected)
def test_sub_fail(self):
index = self.strIndex
pytest.raises(TypeError, lambda: index - 'a')
pytest.raises(TypeError, lambda: index - index)
pytest.raises(TypeError, lambda: index - index.tolist())
pytest.raises(TypeError, lambda: index.tolist() - index)
def test_sub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(0), Decimal(1)])
result = index - Decimal(1)
tm.assert_index_equal(result, expected)
result = index - pd.Index([Decimal(1), Decimal(1)])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
index - 'foo'
with pytest.raises(TypeError):
index - np.array([2, 'foo'])
def test_rsub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(1), Decimal(0)])
result = Decimal(2) - index
tm.assert_index_equal(result, expected)
result = np.array([Decimal(2), Decimal(2)]) - index
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
'foo' - index
with pytest.raises(TypeError):
np.array([True, pd.Timestamp.now()]) - index
def test_map_identity_mapping(self):
# GH 12766
# TODO: replace with fixture
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
result = index.map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize("attr", [
'makeDateIndex', 'makePeriodIndex', 'makeTimedeltaIndex'])
def test_map_tseries_indices_return_index(self, attr):
index = getattr(tm, attr)(10)
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
expected = Index(range(24), name='hourly')
tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
# GH 12756
expected = Index(['foo', 'bar', 'baz'])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
# TODO: replace with fixture
for name in self.indices.keys():
if name == 'catIndex':
# Tested in test_categorical
continue
elif name == 'repeats':
# Cannot map duplicated index
continue
index = self.indices[name]
expected = Index(np.arange(len(index), 0, -1))
# to match proper result coercion for uints
if name == 'empty':
expected = Index([])
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("mapper", [
Series(['foo', 2., 'baz'], index=[0, 2, -1]),
{0: 'foo', 2: 2.0, -1: 'baz'}])
def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2., np.nan, 'foo'])
result = Index([2, 1, 0]).map(mapper)
tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
index = Index([1.5, np.nan, 3, np.nan, 5])
result = index.map(lambda x: x * 2, na_action='ignore')
expected = index * 2
tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
index = Index([1, 2, 3])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = index.map(default_dict)
expected = Index(['stuff', 'blank', 'blank'])
tm.assert_index_equal(result, expected)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("name,expected", [
('foo', 'foo'), ('bar', None)])
def test_append_empty_preserve_name(self, name, expected):
left = Index([], name='foo')
right = Index([1, 2, 3], name=name)
result = left.append(right)
assert result.name == expected
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
assert 'a' not in index2
assert 'afoo' in index2
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
assert 'a' in index
index += '_x'
assert 'a_x' in index
@pytest.mark.parametrize("second_name,expected", [
(None, None), ('name', 'name')])
def test_difference_name_preservation(self, second_name, expected):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
second.name = second_name
result = first.difference(second)
assert tm.equalContents(result, answer)
if expected is None:
assert result.name is None
else:
assert result.name == expected
def test_difference_empty_arg(self):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference([])
assert tm.equalContents(result, first)
assert result.name == first.name
def test_difference_identity(self):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference(first)
assert len(result) == 0
assert result.name == first.name
def test_symmetric_difference(self):
# smoke
index1 = Index([1, 2, 3, 4], name='index1')
index2 = Index([2, 3, 4, 5])
result = index1.symmetric_difference(index2)
expected = Index([1, 5])
assert tm.equalContents(result, expected)
assert result.name is None
# __xor__ syntax
expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
def test_symmetric_difference_mi(self):
index1 = MultiIndex.from_tuples(self.tuples)
index2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = index1.symmetric_difference(index2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
assert tm.equalContents(result, expected)
@pytest.mark.parametrize("index2,expected", [
(Index([0, 1, np.nan]), Index([0.0, 2.0, 3.0])),
(Index([0, 1]), Index([0.0, 2.0, 3.0, np.nan]))])
def test_symmetric_difference_missing(self, index2, expected):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
index1 = Index([1, np.nan, 2, 3])
result = index1.symmetric_difference(index2)
tm.assert_index_equal(result, expected)
def test_symmetric_difference_non_index(self):
index1 = Index([1, 2, 3, 4], name='index1')
index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = index1.symmetric_difference(index2)
assert tm.equalContents(result, expected)
assert result.name == 'index1'
result = index1.symmetric_difference(index2, result_name='new_name')
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
def test_difference_type(self):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
result = index.difference(index)
expected = index.drop(index)
tm.assert_index_equal(result, expected)
def test_intersection_difference(self):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
inter = index.intersection(index.drop(index))
diff = index.difference(index)
tm.assert_index_equal(inter, diff)
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', True), ('dateIndex', False), ('floatIndex', True)])
def test_is_numeric(self, attr, expected):
assert getattr(self, attr).is_numeric() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', True), ('boolIndex', True), ('catIndex', False),
('intIndex', False), ('dateIndex', False), ('floatIndex', False)])
def test_is_object(self, attr, expected):
assert getattr(self, attr).is_object() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', False), ('dateIndex', True), ('floatIndex', False)])
def test_is_all_dates(self, attr, expected):
assert getattr(self, attr).is_all_dates == expected
def test_summary(self):
self._check_method_works(Index._summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind._summary()
# shouldn't be formatted accidentally.
assert '~:{range}:0' in result
assert '{other}%s' in result
# GH18217
def test_summary_deprecated(self):
ind = Index(['{other}%s', "~:{range}:0"], name='A')
with tm.assert_produces_warning(FutureWarning):
ind.summary()
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formatting does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
self.strIndex[:0].format()
@pytest.mark.parametrize("vals", [
[1, 2.0 + 3.0j, 4.], ['a', 'b', 'c']])
def test_format_missing(self, vals, nulls_fixture):
# 2845
vals = list(vals) # Copy for each iteration
vals.append(nulls_fixture)
index = Index(vals)
formatted = index.format()
expected = [str(index[0]), str(index[1]), str(index[2]), u('NaN')]
assert formatted == expected
assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
assert formatted[0] == 'something'
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
assert len(result) == 2
assert result == expected
@pytest.mark.parametrize("op", ['any', 'all'])
def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method):
# TODO: make this a dedicated test with parametrized methods
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("expected,method", [
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'pad'),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'ffill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'backfill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'bfill')])
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with pytest.raises(ValueError, match='tolerance argument'):
index.get_indexer([1, 0], tolerance=1)
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
'method, tolerance, indexer, expected',
[
('pad', None, [0, 5, 9], [0, 5, 9]),
('backfill', None, [0, 5, 9], [0, 5, 9]),
('nearest', None, [0, 5, 9], [0, 5, 9]),
('pad', 0, [0, 5, 9], [0, 5, 9]),
('backfill', 0, [0, 5, 9], [0, 5, 9]),
('nearest', 0, [0, 5, 9], [0, 5, 9]),
('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
@pytest.mark.parametrize('listtype', [list, tuple, Series, np.array])
@pytest.mark.parametrize(
'tolerance, expected',
list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1],
[0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1],
[-1, 2, 9]])))
def test_get_indexer_nearest_listlike_tolerance(self, tolerance,
expected, listtype):
index = Index(np.arange(10))
actual = index.get_indexer([0.2, 1.8, 8.5], method='nearest',
tolerance=listtype(tolerance))
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], method='nearest', limit=1)
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_indexer([1, 0], method='nearest',
tolerance=[1, 2, 3])
@pytest.mark.parametrize("method,expected", [
('pad', [8, 7, 0]), ('backfill', [9, 8, 1]), ('nearest', [9, 7, 0])])
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("method,expected", [
('pad', np.array([-1, 0, 1, 1], dtype=np.intp)),
('backfill', np.array([0, 0, 1, -1], dtype=np.intp))])
def test_get_indexer_strings(self, method, expected):
index = pd.Index(['b', 'c'])
actual = index.get_indexer(['a', 'b', 'c', 'd'], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = pd.Index(['b', 'c'])
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad',
tolerance=[2, 2, 2, 2])
def test_get_indexer_numeric_index_boolean_target(self):
# GH 16877
numeric_index = pd.Index(range(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_with_NA_values(self, unique_nulls_fixture,
unique_nulls_fixture2):
# GH 22332
# check pairwise, that no pair of na values
# is mangled
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values are not unique
arr = np.array([unique_nulls_fixture,
unique_nulls_fixture2], dtype=np.object)
index = pd.Index(arr, dtype=np.object)
result = index.get_indexer([unique_nulls_fixture,
unique_nulls_fixture2, 'Unknown'])
expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc(self, method):
index = pd.Index([0, 1, 2])
assert index.get_loc(1, method=method) == 1
if method:
assert index.get_loc(1, method=method, tolerance=0) == 1
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc_raises_bad_label(self, method):
index = pd.Index([0, 1, 2])
if method:
# Messages vary across versions
if PY36:
msg = 'not supported between'
elif PY35:
msg = 'unorderable types'
else:
if method == 'nearest':
msg = 'unsupported operand'
else:
msg = 'requires scalar valued input'
else:
msg = 'invalid key'
with pytest.raises(TypeError, match=msg):
index.get_loc([1, 2], method=method)
@pytest.mark.parametrize("method,loc", [
('pad', 1), ('backfill', 2), ('nearest', 1)])
def test_get_loc_tolerance(self, method, loc):
index = pd.Index([0, 1, 2])
assert index.get_loc(1.1, method) == loc
assert index.get_loc(1.1, method, tolerance=1) == loc
@pytest.mark.parametrize("method", ['pad', 'backfill', 'nearest'])
def test_get_loc_outside_tolerance_raises(self, method):
index = pd.Index([0, 1, 2])
with pytest.raises(KeyError, match='1.1'):
index.get_loc(1.1, method, tolerance=0.05)
def test_get_loc_bad_tolerance_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='must be numeric'):
index.get_loc(1.1, 'nearest', tolerance='invalid')
def test_get_loc_tolerance_no_method_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance .* valid if'):
index.get_loc(1.1, tolerance=1)
def test_get_loc_raises_missized_tolerance(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_loc(1.1, 'nearest', tolerance=[1, 1])
def test_get_loc_raises_object_nearest(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='nearest')
def test_get_loc_raises_object_tolerance(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='pad', tolerance='invalid')
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
def test_slice_float_locs(self):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
@pytest.mark.xfail(reason="Assertions were not correct - see GH#20915",
strict=True)
def test_slice_ints_with_floats_raises(self):
# int slicing with floats
# GH 4892, these are all TypeErrors
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
n = len(index)
pytest.raises(TypeError,
lambda: index.slice_locs(5.0, 10.0))
pytest.raises(TypeError,
lambda: index.slice_locs(4.5, 10.5))
index2 = index[::-1]
pytest.raises(TypeError,
lambda: index2.slice_locs(8.5, 1.5), (2, 6))
pytest.raises(TypeError,
lambda: index2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
index = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert index.slice_locs('a', 'd') == (0, 6)
assert index.slice_locs(end='d') == (0, 6)
assert index.slice_locs('a', 'c') == (0, 4)
assert index.slice_locs('b', 'd') == (2, 6)
index2 = index[::-1]
assert index2.slice_locs('d', 'a') == (0, 6)
assert index2.slice_locs(end='a') == (0, 6)
assert index2.slice_locs('d', 'b') == (0, 4)
assert index2.slice_locs('c', 'a') == (2, 6)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with pytest.raises(KeyError, match=''):
index.slice_locs(start=1.5)
with pytest.raises(KeyError, match=''):
index.slice_locs(end=1.5)
@pytest.mark.parametrize("in_slice,expected", [
(pd.IndexSlice[::-1], 'yxdcb'), (pd.IndexSlice['b':'y':-1], ''),
(pd.IndexSlice['b'::-1], 'b'), (pd.IndexSlice[:'b':-1], 'yxdcb'),
(pd.IndexSlice[:'y':-1], 'y'), (pd.IndexSlice['y'::-1], 'yxdcb'),
(pd.IndexSlice['y'::-4], 'yb'),
# absent labels
(pd.IndexSlice[:'a':-1], 'yxdcb'), (pd.IndexSlice[:'a':-2], 'ydb'),
(pd.IndexSlice['z'::-1], 'yxdcb'), (pd.IndexSlice['z'::-3], 'yc'),
(pd.IndexSlice['m'::-1], 'dcb'), (pd.IndexSlice[:'m':-1], 'yx'),
(pd.IndexSlice['a':'a':-1], ''), (pd.IndexSlice['z':'z':-1], ''),
(pd.IndexSlice['m':'m':-1], '')
])
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list('bcdxy'))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = index[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
def test_drop_by_str_label(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("keys", [['foo', 'bar'], ['1', 'bar']])
def test_drop_by_str_label_raises_missing_keys(self, keys):
with pytest.raises(KeyError, match=''):
self.strIndex.drop(keys)
def test_drop_by_str_label_errors_ignore(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
# errors='ignore'
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
# TODO: Parametrize numeric and str tests after self.strIndex fixture
index = Index([1, 2, 3])
dropped = index.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_raises_missing_keys(self):
index = Index([1, 2, 3])
with pytest.raises(KeyError, match=''):
index.drop([3, 4])
@pytest.mark.parametrize("key,expected", [
(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))])
def test_drop_by_numeric_label_errors_ignore(self, key, expected):
index = Index([1, 2, 3])
dropped = index.drop(key, errors='ignore')
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("values", [['a', 'b', ('c', 'd')],
['a', ('c', 'd'), 'b'],
[('c', 'd'), 'a', 'b']])
@pytest.mark.parametrize("to_drop", [[('c', 'd'), 'a'], ['a', ('c', 'd')]])
def test_drop_tuple(self, values, to_drop):
# GH 18304
index = pd.Index(values)
expected = pd.Index(['b'])
result = index.drop(to_drop)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[0])
for drop_me in to_drop[1], [to_drop[1]]:
result = removed.drop(drop_me)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
for drop_me in to_drop[1], [to_drop[1]]:
pytest.raises(KeyError, removed.drop, drop_me)
@pytest.mark.parametrize("method,expected", [
('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])),
('union', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'),
(2, 'C')], dtype=[('num', int), ('let', 'a1')]))
])
def test_tuple_union_bug(self, method, expected):
index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]))
index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')]))
result = getattr(index1, method)(index2)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("attr", [
'is_monotonic_increasing', 'is_monotonic_decreasing',
'_is_strictly_monotonic_increasing',
'_is_strictly_monotonic_decreasing'])
def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
def test_get_set_value(self):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
assert values[67] == 10
@pytest.mark.parametrize("values", [
['foo', 'bar', 'quux'], {'foo', 'bar', 'quux'}])
@pytest.mark.parametrize("index,expected", [
(Index(['qux', 'baz', 'foo', 'bar']),
np.array([False, False, True, True])),
(Index([]), np.array([], dtype=bool)) # empty
])
def test_isin(self, values, index, expected):
result = index.isin(values)
tm.assert_numpy_array_equal(result, expected)
def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
# all nans are the same
if (isinstance(nulls_fixture, float) and
isinstance(nulls_fixture2, float) and
math.isnan(nulls_fixture) and
math.isnan(nulls_fixture2)):
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
else:
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, False]))
def test_isin_nan_common_float64(self, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[np.nan]), np.array([False, True]))
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[pd.NaT]), np.array([False, False]))
@pytest.mark.parametrize("level", [0, -1])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg(self, level, index):
values = index.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, index.isin(values, level=level))
index.name = 'foobar'
tm.assert_numpy_array_equal(expected,
index.isin(values, level='foobar'))
@pytest.mark.parametrize("level", [1, 10, -2])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_bad_index(self, level, index):
with pytest.raises(IndexError, match='Too many levels'):
index.isin([], level=level)
@pytest.mark.parametrize("level", [1.0, 'foobar', 'xyzzy', np.nan])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_key(self, level, index):
with pytest.raises(KeyError, match='must be same as name'):
index.isin([], level=level)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
index = Index(["a", "b"])
expected = np.array([False, False])
result = index.isin(empty)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("values", [
[1, 2, 3, 4],
[1., 2., 3., 4.],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
pd.date_range('2018-01-01', freq='D', periods=4)])
def test_boolean_cmp(self, values):
index = Index(values)
result = (index == values)
expected = np.array([True, True, True, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("name,level", [
(None, 0), ('a', 'a')])
def test_get_level_values(self, name, level):
expected = self.strIndex.copy()
if name:
expected.name = name
result = expected.get_level_values(level)
tm.assert_index_equal(result, expected)
def test_slice_keep_name(self):
index = Index(['a', 'b'], name='asdf')
assert index.name == index[1:].name
# instance attributes of the form self.<name>Index
@pytest.mark.parametrize('index_kind',
['unicode', 'str', 'date', 'int', 'float'])
def test_join_self(self, join_type, index_kind):
res = getattr(self, '{0}Index'.format(index_kind))
joined = res.join(res, how=join_type)
assert res is joined
@pytest.mark.parametrize("method", ['strip', 'rstrip', 'lstrip'])
def test_str_attribute(self, method):
# GH9068
index = Index([' jack', 'jill ', ' jesse ', 'frank'])
expected = Index([getattr(str, method)(x) for x in index.values])
result = getattr(index.str, method)()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
PeriodIndex(start='2000', end='2010', freq='A')])
def test_str_attribute_raises(self, index):
with pytest.raises(AttributeError, match='only use .str accessor'):
index.str.repeat(2)
@pytest.mark.parametrize("expand,expected", [
(None, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(False, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(True, MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)]))])
def test_str_split(self, expand, expected):
index = Index(['a b c', 'd e', 'f'])
if expand is not None:
result = index.str.split(expand=expand)
else:
result = index.str.split()
tm.assert_index_equal(result, expected)
def test_str_bool_return(self):
# test boolean case, should return np.array instead of boolean Index
index = Index(['a1', 'a2', 'b1', 'b2'])
result = index.str.startswith('a')
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
def test_str_bool_series_indexing(self):
index = Index(['a1', 'a2', 'b1', 'b2'])
s = Series(range(4), index=index)
result = s[s.index.str.startswith('a')]
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index,expected", [
(Index(list('abcd')), True), (Index(range(4)), False)])
def test_tab_completion(self, index, expected):
# GH 9910
result = 'str' in dir(index)
assert result == expected
def test_indexing_doesnt_change_class(self):
index = Index([1, 2, 3, 'a', 'b', 'c'])
assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_index = Index(np.random.permutation(15))
right_index = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how='outer')
# right_index in this case because DatetimeIndex has join precedence
# over Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_index.astype(object).union(
left_index.astype(object))
tm.assert_index_equal(result, expected)
def test_nan_first_take_datetime(self):
index = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
result = index.take([-1, 0, 1])
expected = Index([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
def test_take_fill_value(self):
# GH 12631
index = pd.Index(list('ABC'), name='xxx')
result = index.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = index.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = index.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
def test_take_fill_value_none_raises(self):
index = pd.Index(list('ABC'), name='xxx')
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -5]), fill_value=True)
def test_take_bad_bounds_raises(self):
index = pd.Index(list('ABC'), name='xxx')
with pytest.raises(IndexError, match='out of bounds'):
index.take(np.array([1, -5]))
@pytest.mark.parametrize("name", [None, 'foobar'])
@pytest.mark.parametrize("labels", [
[], np.array([]), ['A', 'B', 'C'], ['C', 'B', 'A'],
np.array(['A', 'B', 'C']), np.array(['C', 'B', 'A']),
# Must preserve name even if dtype changes
pd.date_range('20130101', periods=3).values,
pd.date_range('20130101', periods=3).tolist()])
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name,
labels):
# GH6552
index = pd.Index([0, 1, 2])
index.name = name
assert index.reindex(labels)[0].name == name
@pytest.mark.parametrize("labels", [
[], np.array([]), np.array([], dtype=np.int64)])
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self,
labels):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == np.object_
@pytest.mark.parametrize("labels,dtype", [
(pd.Int64Index([]), np.int64),
(pd.Float64Index([]), np.float64),
(pd.DatetimeIndex([]), np.datetime64)])
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self,
labels,
dtype):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
index = pd.Index(list('abc'))
result = index.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
assert result.levels[0].dtype.type == np.int64
assert result.levels[1].dtype.type == np.float64
def test_groupby(self):
index = Index(range(5))
result = index.groupby(np.array([1, 1, 2, 2, 2]))
expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize("mi,expected", [
(MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),
(MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False]))])
def test_equals_op_multiindex(self, mi, expected):
# GH9785
# test comparisons of multiindex
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == mi
tm.assert_numpy_array_equal(result, expected)
def test_equals_op_multiindex_identify(self):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == df.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", [
MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),
Index(['foo', 'bar', 'baz'])])
def test_equals_op_mismatched_multiindex_raises(self, index):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
with pytest.raises(ValueError, match="Lengths must match"):
df.index == index
def test_equals_op_index_vs_mi_same_length(self):
mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
index = Index(['foo', 'bar', 'baz'])
result = mi == index
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_conv", [
pd.to_datetime, pd.to_timedelta])
def test_dt_conversion_preserves_name(self, dt_conv):
# GH 10875
index = pd.Index(['01:02:03', '01:02:04'], name='label')
assert index.name == dt_conv(index).name
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index(['a', 'bb', 'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_compat(self, index, expected):
result = unicode(index) # noqa
assert result == expected
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option_compat(self, index,
expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = unicode(index) # noqa
assert result == expected
@pytest.mark.parametrize('dtype', [np.int64, np.float64])
@pytest.mark.parametrize('delta', [1, 0, -1])
def test_addsub_arithmetic(self, dtype, delta):
# GH 8142
delta = dtype(delta)
index = pd.Index([10, 11, 12], dtype=dtype)
result = index + delta
expected = pd.Index(index.values + delta, dtype=dtype)
tm.assert_index_equal(result, expected)
# this subtraction used to fail
result = index - delta
expected = pd.Index(index.values - delta, dtype=dtype)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index + index, 2 * index)
tm.assert_index_equal(index - index, 0 * index)
assert not (index - index).empty
def test_iadd_preserves_name(self):
# GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name
ser = pd.Series([1, 2, 3])
ser.index.name = 'foo'
ser.index += 1
assert ser.index.name == "foo"
ser.index -= 1
assert ser.index.name == "foo"
def test_cached_properties_not_settable(self):
index = pd.Index([1, 2, 3])
with pytest.raises(AttributeError, match="Can't set attribute"):
index.is_unique = False
def test_get_duplicates_deprecated(self):
index = pd.Index([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
index.get_duplicates()
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('idx.', 4))
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
def setup_method(self, method):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
result = index.argsort()
elif PY3:
with pytest.raises(TypeError, match="unorderable types"):
result = index.argsort()
else:
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
result = np.argsort(index)
elif PY3:
with pytest.raises(TypeError, match="unorderable types"):
result = np.argsort(index)
else:
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = self.create_index()
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
warning_type = RuntimeWarning if PY3 else None
with tm.assert_produces_warning(warning_type):
# Python 3: Unorderable types
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = pd.Index([1, 2], name='MyName')
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name='NewName')
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == 'MyName'
assert index2.name == 'NewName'
index3 = index.copy(names=['NewName'])
tm.assert_index_equal(index, index3, check_names=False)
assert index.name == 'MyName'
assert index.names == ['MyName']
assert index3.name == 'NewName'
assert index3.names == ['NewName']
def test_union_base(self):
index = self.create_index()
first = index[3:]
second = index[:5]
if PY3:
# unorderable types
warn_type = RuntimeWarning
else:
warn_type = None
with tm.assert_produces_warning(warn_type):
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[3:]
second = index[:5]
if PY3:
# unorderable types
warn_type = RuntimeWarning
else:
warn_type = None
with tm.assert_produces_warning(warn_type):
result = first.union(klass(second.values))
assert tm.equalContents(result, index)
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(second)
expected = Index([0, 'a', 1])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_intersection_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(klass(second.values))
assert tm.equalContents(result, second)
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.difference(second)
expected = Index([0, 1, 'a'])
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("dtype", [
None, object, 'category'])
@pytest.mark.parametrize("vals,expected", [
([1, 2, 3], [1, 2, 3]), ([1., 2., 3.], [1., 2., 3.]),
([1., 2., np.nan, 3.], [1., 2., 3.]),
(['A', 'B', 'C'], ['A', 'B', 'C']),
(['A', np.nan, 'B', 'C'], ['A', 'B', 'C'])])
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = pd.Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = pd.Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("index,expected", [
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.TimedeltaIndex(['1 days', '2 days', '3 days']),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')),
(pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'))])
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index(self):
result = _get_combined_index([])
expected = Index([])
tm.assert_index_equal(result, expected)
def test_repeat(self):
repeats = 2
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
pd.Index([np.nan]), pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT'])])
def test_is_monotonic_na(self, index):
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
result = repr(pd.Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(['a', 'b', 'c'], name=0)
result = klass(lrange(3), index=index)
assert '0' in repr(result)
def test_print_unicode_columns(self):
df = pd.DataFrame({u("\u05d0"): [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
@pytest.mark.parametrize("func,compat_func", [
(str, text_type), # unicode string
(bytes, str) # byte string
])
def test_with_unicode(self, func, compat_func):
index = Index(lrange(1000))
if PY3:
func(index)
else:
compat_func(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
index1 = Index(dt_dates, dtype=object)
index2 = Index(['aa'], dtype=object)
result = index2.intersection(index1)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
naive_series = Series(dr)
aware_series = Series(dz)
with pytest.raises(TypeError):
op(dz, naive_series)
with pytest.raises(TypeError):
op(dr, aware_series)
# TODO: implement _assert_tzawareness_compat for the reverse
# comparison with the Series on the left-hand side
class TestIndexUtils(object):
@pytest.mark.parametrize('data, names, expected', [
([[1, 2, 3]], None, Index([1, 2, 3])),
([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')),
([['a', 'a'], ['c', 'd']], None,
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])),
([['a', 'a'], ['c', 'd']], ['L1', 'L2'],
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]],
names=['L1', 'L2'])),
])
def test_ensure_index_from_sequences(self, data, names, expected):
result = ensure_index_from_sequences(data, names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt',
'add', 'radd', 'sub', 'rsub',
'mul', 'rmul', 'truediv', 'rtruediv',
'floordiv', 'rfloordiv',
'pow', 'rpow', 'mod', 'divmod'])
def test_generated_op_names(opname, indices):
index = indices
if isinstance(index, ABCIndex) and opname == 'rsub':
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = '__{name}__'.format(name=opname)
method = getattr(index, opname)
assert method.__name__ == opname
@pytest.mark.parametrize('index_maker', tm.index_subclass_makers_generator())
def test_index_subclass_constructor_wrong_kwargs(index_maker):
# GH #19348
with pytest.raises(TypeError, match='unexpected keyword argument'):
index_maker(foo='bar')
def test_deprecated_fastpath():
with tm.assert_produces_warning(FutureWarning):
idx = pd.Index(
np.array(['a', 'b'], dtype=object), name='test', fastpath=True)
expected = pd.Index(['a', 'b'], name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.Int64Index(
np.array([1, 2, 3], dtype='int64'), name='test', fastpath=True)
expected = pd.Index([1, 2, 3], name='test', dtype='int64')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.RangeIndex(0, 5, 2, name='test', fastpath=True)
expected = pd.RangeIndex(0, 5, 2, name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.CategoricalIndex(['a', 'b', 'c'], name='test', fastpath=True)
expected = pd.CategoricalIndex(['a', 'b', 'c'], name='test')
tm.assert_index_equal(idx, expected)
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
pakodekker/oceansar | oceansar/radarsim/skim_raw.py | 1 | 35022 | #!/usr/bin/env python
""" ========================================
sar_raw: SAR Raw data Generator (:mod:`srg`)
========================================
Script to compute SAR Raw data from an ocean surface.
**Arguments**
* -c, --config_file: Configuration file
* -o, --output_file: Output file
* -oc, --ocean_file: Ocean output file
* [-ro, --reuse_ocean_file]: Reuse ocean file if it exists
* [-er, --errors_file]: Errors file (only if system errors model is activated)
* [-re, --reuse_errors_file]: Reuse errors file if it exists
.. note::
This script MUST be run using MPI!
Example (4 cores machine)::
mpiexec -np 4 python sar_raw.py -c config.par -o raw_data.nc -oc ocean_state.nc -ro -er errors_file.nc -re
"""
from mpi4py import MPI
import os
import time
import argparse
import numpy as np
from matplotlib import pyplot as plt
from scipy import linalg
import numexpr as ne
import datetime
from oceansar import utils
from oceansar import ocs_io as tpio
from oceansar.utils import geometry as geosar
from oceansar.radarsim.antenna import sinc_1tx_nrx
from oceansar import constants as const
from oceansar import nrcs as rcs
from oceansar import closure
from oceansar.radarsim import range_profile as raw
from oceansar.surfaces import OceanSurface, OceanSurfaceBalancer
from oceansar.swell_spec import dir_swell_spec as s_spec
def upsample_and_dopplerize(ssraw, dop, n_up, prf):
"""
:param ssraw: data
:param dop: Geomeytric Dopper
:param n_up: Upsampling factor
:param prf: PRF
:return:
"""
# FIXME
# We should add global (mean) range cell migration here
# The azimuth dependent part is handled in the main loop of the code
# The range dependent part of the rcm is also not added.
dims = ssraw.shape
print(n_up)
out = np.zeros((dims[0] * int(n_up), dims[2]), dtype=np.complex64)
# tmp = np.zeros([raw.shape[0] * int(n_up), raw.shape[2]], dtype=np.complex)
t = (np.arange(dims[0] * int(n_up)) / prf).reshape((dims[0] * int(n_up), 1))
t2pi = t * (np.pi * 2)
for ind in range(dims[1]):
raw_zp = np.zeros((dims[0] * int(n_up), dims[2]), dtype=np.complex64)
raw_zp[0:dims[0]] = np.fft.fftshift(np.fft.fft(ssraw[:, ind, :], axis=0), axes=(0,))
raw_zp = np.roll(raw_zp, int(-dims[0]/2), axis=0)
raw_zp = np.conj(np.fft.fft(np.conj(raw_zp), axis=0)) / dims[0]
dop_phase = t2pi * (dop[ind]).reshape((1, dims[2]))
out = out + raw_zp * np.exp(1j * dop_phase)
# out = np.zeros([ssraw.shape[0], int(n_up), ssraw.shape[2]], dtype=np.complex64)
# out = out + np.sum(ssraw, axis=1).reshape((dims[0], 1, dims[2]))
# out = out.reshape((ssraw.shape[0] * int(n_up), ssraw.shape[2]))
return out
def skimraw(cfg_file, output_file, ocean_file, reuse_ocean_file, errors_file, reuse_errors_file,
plot_save=True):
###################
# INITIALIZATIONS #
###################
# MPI SETUP
comm = MPI.COMM_WORLD
size, rank = comm.Get_size(), comm.Get_rank()
# WELCOME
if rank == 0:
print('-------------------------------------------------------------------')
print(time.strftime("- OCEANSAR SKIM RAW GENERATOR: %Y-%m-%d %H:%M:%S", time.localtime()))
# print('- Copyright (c) Gerard Marull Paretas, Paco Lopez Dekker')
print('-------------------------------------------------------------------')
# CONFIGURATION FILE
# Note: variables are 'copied' to reduce code verbosity
cfg = tpio.ConfigFile(cfg_file)
info = utils.PrInfo(cfg.sim.verbosity, "SKIM raw")
# RAW
wh_tol = cfg.srg.wh_tol
nesz = cfg.srg.nesz
use_hmtf = cfg.srg.use_hmtf
scat_spec_enable = cfg.srg.scat_spec_enable
scat_spec_mode = cfg.srg.scat_spec_mode
scat_bragg_enable = cfg.srg.scat_bragg_enable
scat_bragg_model = cfg.srg.scat_bragg_model
scat_bragg_d = cfg.srg.scat_bragg_d
scat_bragg_spec = cfg.srg.scat_bragg_spec
scat_bragg_spread = cfg.srg.scat_bragg_spread
# SAR
inc_angle = np.deg2rad(cfg.radar.inc_angle)
f0 = cfg.radar.f0
pol = cfg.radar.pol
squint_r = np.radians(90 - cfg.radar.azimuth)
if pol == 'DP':
do_hh = True
do_vv = True
elif pol == 'hh':
do_hh = True
do_vv = False
else:
do_hh = False
do_vv = True
prf = cfg.radar.prf
num_ch = int(cfg.radar.num_ch)
ant_L = cfg.radar.ant_L
alt = cfg.radar.alt
v_ground = cfg.radar.v_ground
rg_bw = cfg.radar.rg_bw
over_fs = cfg.radar.Fs / cfg.radar.rg_bw
sigma_n_tx = cfg.radar.sigma_n_tx
phase_n_tx = np.deg2rad(cfg.radar.phase_n_tx)
sigma_beta_tx = cfg.radar.sigma_beta_tx
phase_beta_tx = np.deg2rad(cfg.radar.phase_beta_tx)
sigma_n_rx = cfg.radar.sigma_n_rx
phase_n_rx = np.deg2rad(cfg.radar.phase_n_rx)
sigma_beta_rx = cfg.radar.sigma_beta_rx
phase_beta_rx = np.deg2rad(cfg.radar.phase_beta_rx)
# OCEAN / OTHERS
ocean_dt = cfg.ocean.dt
if hasattr(cfg.sim, "cal_targets"):
if cfg.sim.cal_targets is False:
add_point_target = False # This for debugging
point_target_floats = True # Not really needed, but makes coding easier later
else:
print("Adding cal targets")
add_point_target = True
if cfg.sim.cal_targets.lower() == 'floating':
point_target_floats = True
else:
point_target_floats = False
else:
add_point_target = False # This for debugging
point_target_floats = True
n_sinc_samples = 10
sinc_ovs = 20
chan_sinc_vec = raw.calc_sinc_vec(n_sinc_samples, sinc_ovs, Fs=over_fs)
# Set win direction with respect to beam
# I hope the following line is correct, maybe sign is wrong
wind_dir = cfg.radar.azimuth - cfg.ocean.wind_dir
# OCEAN SURFACE
if rank == 0:
print('Initializing ocean surface...')
surface_full = OceanSurface()
# Setup compute values
compute = ['D', 'Diff', 'Diff2']
if use_hmtf:
compute.append('hMTF')
# Try to reuse initialized surface
if reuse_ocean_file:
try:
surface_full.load(ocean_file, compute)
except RuntimeError:
pass
if (not reuse_ocean_file) or (not surface_full.initialized):
if hasattr(cfg.ocean, 'use_buoy_data'):
if cfg.ocean.use_buoy_data:
bdataf = cfg.ocean.buoy_data_file
date = datetime.datetime(np.int(cfg.ocean.year),
np.int(cfg.ocean.month),
np.int(cfg.ocean.day),
np.int(cfg.ocean.hour),
np.int(cfg.ocean.minute), 0)
date, bdata = tpio.load_buoydata(bdataf, date)
# FIX-ME: direction needs to consider also azimuth of beam
buoy_spec = tpio.BuoySpectra(bdata, heading=cfg.radar.heading, depth=cfg.ocean.depth)
dirspectrum_func = buoy_spec.Sk2
# Since the wind direction is included in the buoy data
wind_dir = 0
else:
dirspectrum_func = None
if cfg.ocean.swell_dir_enable:
dir_swell_spec = s_spec.ardhuin_swell_spec
else:
dir_swell_spec = None
wind_dir = np.deg2rad(wind_dir)
else:
if cfg.ocean.swell_dir_enable:
dir_swell_spec = s_spec.ardhuin_swell_spec
else:
dir_swell_spec = None
dirspectrum_func = None
wind_dir = np.deg2rad(wind_dir)
surface_full.init(cfg.ocean.Lx, cfg.ocean.Ly, cfg.ocean.dx,
cfg.ocean.dy, cfg.ocean.cutoff_wl,
cfg.ocean.spec_model, cfg.ocean.spread_model,
wind_dir,
cfg.ocean.wind_fetch, cfg.ocean.wind_U,
cfg.ocean.current_mag,
np.deg2rad(cfg.radar.azimuth - cfg.ocean.current_dir),
cfg.radar.azimuth - cfg.ocean.dir_swell_dir,
cfg.ocean.freq_r, cfg.ocean.sigf,
cfg.ocean.sigs, cfg.ocean.Hs,
cfg.ocean.swell_dir_enable,
cfg.ocean.swell_enable, cfg.ocean.swell_ampl,
np.deg2rad(cfg.radar.azimuth - cfg.ocean.swell_dir),
cfg.ocean.swell_wl,
compute, cfg.ocean.opt_res,
cfg.ocean.fft_max_prime,
choppy_enable=cfg.ocean.choppy_enable,
depth=cfg.ocean.depth,
dirspectrum_func=dirspectrum_func,
dir_swell_spec=dir_swell_spec)
surface_full.save(ocean_file)
# Now we plot the directional spectrum
# self.wave_dirspec[good_k] = dirspectrum_func(self.kx[good_k], self.ky[good_k])
plt.figure()
plt.imshow(np.fft.fftshift(surface_full.wave_dirspec),
extent=[surface_full.kx.min(), surface_full.kx.max(),
surface_full.ky.min(), surface_full.ky.max()],
origin='lower',
cmap='inferno_r')
plt.grid(True)
pltax = plt.gca()
pltax.set_xlim((-1, 1))
pltax.set_ylim((-1, 1))
Narr_length = 0.08 # np.min([surface_full.kx.max(), surface_full.ky.max()])
pltax.arrow(0, 0,
-Narr_length * np.sin(np.radians(cfg.radar.heading)),
Narr_length * np.cos(np.radians(cfg.radar.heading)),
fc="k", ec="k")
plt.xlabel('$k_x$ [rad/m]')
plt.ylabel('$k_y$ [rad/m]')
plt.colorbar()
#plt.show()
# Create plots directory
plot_path = os.path.dirname(output_file) + os.sep + 'raw_plots'
if plot_save:
if not os.path.exists(plot_path):
os.makedirs(plot_path)
plt.savefig(os.path.join(plot_path, 'input_dirspectrum.png'))
plt.close()
if cfg.ocean.swell_dir_enable:
plt.figure()
plt.imshow(np.fft.fftshift(np.abs(surface_full.swell_dirspec)),
extent=[surface_full.kx.min(), surface_full.kx.max(),
surface_full.ky.min(), surface_full.ky.max()],
origin='lower',
cmap='inferno_r')
plt.grid(True)
pltax = plt.gca()
pltax.set_xlim((-0.1, 0.1))
pltax.set_ylim((-0.1, 0.1))
Narr_length = 0.08 # np.min([surface_full.kx.max(), surface_full.ky.max()])
pltax.arrow(0, 0,
-Narr_length * np.sin(np.radians(cfg.radar.heading)),
Narr_length * np.cos(np.radians(cfg.radar.heading)),
fc="k", ec="k")
plt.xlabel('$k_x$ [rad/m]')
plt.ylabel('$k_y$ [rad/m]')
plt.colorbar()
#plt.show()
# Create plots directory
plot_path = os.path.dirname(output_file) + os.sep + 'raw_plots'
if plot_save:
if not os.path.exists(plot_path):
os.makedirs(plot_path)
plt.savefig(os.path.join(plot_path, 'input_dirspectrum_combined.png'))
plt.close()
else:
surface_full = None
# Initialize surface balancer
surface = OceanSurfaceBalancer(surface_full, ocean_dt)
# CALCULATE PARAMETERS
if rank == 0:
print('Initializing simulation parameters...')
# SR/GR/INC Matrixes
sr0 = geosar.inc_to_sr(inc_angle, alt)
gr0 = geosar.inc_to_gr(inc_angle, alt)
gr = surface.x + gr0
sr, inc, _ = geosar.gr_to_geo(gr, alt)
print(sr.dtype)
look = geosar.inc_to_look(inc, alt)
min_sr = np.min(sr)
# sr -= np.min(sr)
#inc = np.repeat(inc[np.newaxis, :], surface.Ny, axis=0)
#sr = np.repeat(sr[np.newaxis, :], surface.Ny, axis=0)
#gr = np.repeat(gr[np.newaxis, :], surface.Ny, axis=0)
#Let's try to safe some memory and some operations
inc = inc.reshape(1, inc.size)
look = look.reshape(1, inc.size)
sr = sr.reshape(1, sr.size)
gr = gr.reshape(1, gr.size)
sin_inc = np.sin(inc)
cos_inc = np.cos(inc)
# lambda, K, resolution, time, etc.
l0 = const.c/f0
k0 = 2.*np.pi*f0/const.c
sr_res = const.c/(2.*rg_bw)
sr_smp = const.c / (2 * cfg.radar.Fs)
if cfg.radar.L_total:
ant_L = ant_L/np.float(num_ch)
if v_ground == 'auto':
v_ground = geosar.orbit_to_vel(alt, ground=True)
v_orb = geosar.orbit_to_vel(alt, ground=False)
else:
v_orb = v_ground
t_step = 1./prf
az_steps = int(cfg.radar.n_pulses)
t_span = az_steps / prf
rg_samp = np.int(utils.optimize_fftsize(cfg.radar.n_rg))
#min_sr = np.mean(sr) - rg_samp / 2 * sr_smp
print(sr_smp)
max_sr = np.mean(sr) + rg_samp / 2 * sr_smp
sr_prof = (np.arange(rg_samp) - rg_samp/2) * sr_smp + np.mean(sr)
gr_prof, inc_prof, look_prof, b_prof = geosar.sr_to_geo(sr_prof, alt)
look_prof = look_prof.reshape((1, look_prof.size))
sr_prof = sr_prof.reshape((1, look_prof.size))
dop_ref = 2 * v_orb * np.sin(look_prof) * np.sin(squint_r) / l0
print("skim_raw: Doppler Centroid is %f Hz" % (np.mean(dop_ref)))
if cfg.srg.two_scale_Doppler:
# We will compute less surface realizations
n_pulses_b = utils.optimize_fftsize(int(cfg.srg.surface_coh_time * prf))/2
print("skim_raw: down-sampling rate =%i" % (n_pulses_b))
# n_pulses_b = 4
az_steps_ = int(np.ceil(az_steps / n_pulses_b))
t_step = t_step * n_pulses_b
# Maximum length in azimuth that we can consider to have the same geometric Doppler
dy_integ = cfg.srg.phase_err_tol / (2 * k0 * v_ground / min_sr * cfg.srg.surface_coh_time)
surface_dy = surface.y[1] - surface.y[0]
ny_integ = (dy_integ / surface.dy)
print("skim_raw: ny_integ=%f" % (ny_integ))
if ny_integ < 1:
ny_integ = 1
else:
ny_integ = int(2**np.floor(np.log2(ny_integ)))
info.msg("skim_raw: size of intermediate radar data: %f MB" % (8 * ny_integ * az_steps_ * rg_samp *1e-6),
importance=1)
info.msg("skim_raw: ny_integ=%i" % (ny_integ), importance=1)
if do_hh:
proc_raw_hh = np.zeros([az_steps_, int(surface.Ny / ny_integ), rg_samp], dtype=np.complex)
proc_raw_hh_step = np.zeros([surface.Ny, rg_samp], dtype=np.complex)
if do_vv:
proc_raw_vv = np.zeros([az_steps_, int(surface.Ny / ny_integ), rg_samp], dtype=np.complex)
proc_raw_vv_step = np.zeros([surface.Ny, rg_samp], dtype=np.complex)
# Doppler centroid
# sin(a+da) = sin(a) + cos(a)*da - 1/2*sin(a)*da**2
az = surface.y.reshape((surface.Ny, 1))
# FIX-ME: this is a coarse approximation
da = az/gr_prof
sin_az = np.sin(squint_r) + np.cos(squint_r) * da - 0.5 * np.sin(squint_r) * da**2
dop0 = 2 * v_orb * np.sin(look_prof) * sin_az / l0
# az / 2 * sin_sr _az
# az_now = (t_now - t_span / 2.) * v_ground * np.cos(squint_r)
# az = np.repeat((surface.y - az_now)[:, np.newaxis], surface.Nx, axis=1)
# az = (surface.y - az_now).reshape((surface.Ny, 1))
# print("Max az: %f" % (np.max(az)))
#dop0 = np.mean(np.reshape(dop0, (surface.Ny/ny_integ, ny_integ, rg_samp)), axis=1)
s_int = np.int(surface.Ny / ny_integ)
dop0 = np.mean(np.reshape(dop0, (s_int, np.int(ny_integ), rg_samp)), axis=1)
else:
az_steps_ = az_steps
if do_hh:
proc_raw_hh = np.zeros([az_steps, rg_samp], dtype=np.complex)
if do_vv:
proc_raw_vv = np.zeros([az_steps, rg_samp], dtype=np.complex)
t_last_rcs_bragg = -1.
last_progress = -1
NRCS_avg_vv = np.zeros(az_steps, dtype=np.float)
NRCS_avg_hh = np.zeros(az_steps, dtype=np.float)
## RCS MODELS
# Specular
if scat_spec_enable:
if scat_spec_mode == 'kodis':
rcs_spec = rcs.RCSKodis(inc, k0, surface.dx, surface.dy)
elif scat_spec_mode == 'fa' or scat_spec_mode == 'spa':
spec_ph0 = np.random.uniform(0., 2.*np.pi,
size=[surface.Ny, surface.Nx])
rcs_spec = rcs.RCSKA(scat_spec_mode, k0, surface.x, surface.y,
surface.dx, surface.dy)
else:
raise NotImplementedError('RCS mode %s for specular scattering not implemented' % scat_spec_mode)
# Bragg
if scat_bragg_enable:
phase_bragg = np.zeros([2, surface.Ny, surface.Nx])
bragg_scats = np.zeros([2, surface.Ny, surface.Nx], dtype=np.complex)
# dop_phase_p = np.random.uniform(0., 2.*np.pi, size=[surface.Ny, surface.Nx])
# dop_phase_m = np.random.uniform(0., 2.*np.pi, size=[surface.Ny, surface.Nx])
tau_c = closure.grid_coherence(cfg.ocean.wind_U,surface.dx, f0)
rndscat_p = closure.randomscat_ts(tau_c, (surface.Ny, surface.Nx), prf)
rndscat_m = closure.randomscat_ts(tau_c, (surface.Ny, surface.Nx), prf)
# NOTE: This ignores slope, may be changed
k_b = 2.*k0*sin_inc
c_b = sin_inc*np.sqrt(const.g/k_b + 0.072e-3*k_b)
if scat_bragg_model == 'romeiser97':
current_dir = np.deg2rad(cfg.ocean.current_dir)
current_vec = (cfg.ocean.current_mag *
np.array([np.cos(current_dir),
np.sin(current_dir)]))
U_dir = np.deg2rad(cfg.ocean.wind_dir)
U_vec = (cfg.ocean.wind_U *
np.array([np.cos(U_dir), np.sin(U_dir)]))
U_eff_vec = U_vec - current_vec
rcs_bragg = rcs.RCSRomeiser97(k0, inc, pol,
surface.dx, surface.dy,
linalg.norm(U_eff_vec),
np.arctan2(U_eff_vec[1],
U_eff_vec[0]),
surface.wind_fetch,
scat_bragg_spec, scat_bragg_spread,
scat_bragg_d)
else:
raise NotImplementedError('RCS model %s for Bragg scattering not implemented' % scat_bragg_model)
surface_area = surface.dx * surface.dy * surface.Nx * surface.Ny
###################
# SIMULATION LOOP #
###################
if rank == 0:
print('Computing profiles...')
for az_step in np.arange(az_steps_, dtype=np.int):
# AZIMUTH & SURFACE UPDATE
t_now = az_step * t_step
az_now = (t_now - t_span/2.)*v_ground * np.cos(squint_r)
# az = np.repeat((surface.y - az_now)[:, np.newaxis], surface.Nx, axis=1)
az = (surface.y - az_now).reshape((surface.Ny, 1))
surface.t = t_now
if az_step == 0:
# Check wave-height
info.msg("Standard deviation of wave-height (peak-to-peak; i.e. x2): %f" % (2 * np.std(surface.Dz)))
#if az_step == 0:
# print("Max Dx: %f" % (np.max(surface.Dx)))
# print("Max Dy: %f" % (np.max(surface.Dy)))
# print("Max Dz: %f" % (np.max(surface.Dz)))
# print("Max Diffx: %f" % (np.max(surface.Diffx)))
# print("Max Diffy: %f" % (np.max(surface.Diffy)))
# print("Max Diffxx: %f" % (np.max(surface.Diffxx)))
# print("Max Diffyy: %f" % (np.max(surface.Diffyy)))
# print("Max Diffxy: %f" % (np.max(surface.Diffxy)))
# COMPUTE RCS FOR EACH MODEL
# Note: SAR processing is range independent as slant range is fixed
sin_az = az / sr
az_proj_angle = np.arcsin(az / gr0)
# Note: Projected displacements are added to slant range
if point_target_floats is False: # This can only happen if point targets are enabled
surface.Dx[int(surface.Ny / 2), int(surface.Nx / 2)] = 0
surface.Dy[int(surface.Ny / 2), int(surface.Nx / 2)] = 0
surface.Dz[int(surface.Ny / 2), int(surface.Nx / 2)] = 0
if cfg.srg.two_scale_Doppler:
# slant-range for phase
sr_surface = (sr - cos_inc * surface.Dz
+ surface.Dx * sin_inc + surface.Dy * sin_az)
if cfg.srg.rcm:
# add non common rcm
sr_surface4rcm = sr_surface + az / 2 * sin_az
else:
sr_surface4rcm = sr_surface
else:
# FIXME: check if global shift is included, in case we care about slow simulations
# slant-range for phase and Doppler
sr_surface = (sr - cos_inc*surface.Dz + az/2*sin_az
+ surface.Dx*sin_inc + surface.Dy*sin_az)
sr_surface4rcm = sr_surface
if do_hh:
scene_hh = np.zeros([int(surface.Ny), int(surface.Nx)], dtype=np.complex)
if do_vv:
scene_vv = np.zeros([int(surface.Ny), int(surface.Nx)], dtype=np.complex)
# Specular
if scat_spec_enable:
if scat_spec_mode == 'kodis':
Esn_sp = np.sqrt(4.*np.pi)*rcs_spec.field(az_proj_angle, sr_surface,
surface.Diffx, surface.Diffy,
surface.Diffxx, surface.Diffyy, surface.Diffxy)
if do_hh:
scene_hh += Esn_sp
if do_vv:
scene_vv += Esn_sp
else:
# FIXME
if do_hh:
pol_tmp = 'hh'
Esn_sp = (np.exp(-1j*(2.*k0*sr_surface)) * (4.*np.pi)**1.5 *
rcs_spec.field(1, 1, pol_tmp[0], pol_tmp[1],
inc, inc,
az_proj_angle, az_proj_angle + np.pi,
surface.Dz,
surface.Diffx, surface.Diffy,
surface.Diffxx,
surface.Diffyy,
surface.Diffxy))
scene_hh += Esn_sp
if do_vv:
pol_tmp = 'vv'
Esn_sp = (np.exp(-1j*(2.*k0*sr_surface)) * (4.*np.pi)**1.5 *
rcs_spec.field(1, 1, pol_tmp[0], pol_tmp[1],
inc, inc,
az_proj_angle, az_proj_angle + np.pi,
surface.Dz,
surface.Diffx, surface.Diffy,
surface.Diffxx,
surface.Diffyy,
surface.Diffxy))
scene_vv += Esn_sp
NRCS_avg_hh[az_step] += (np.sum(np.abs(Esn_sp)**2) / surface_area)
NRCS_avg_vv[az_step] += NRCS_avg_hh[az_step]
# Bragg
if scat_bragg_enable:
if (t_now - t_last_rcs_bragg) > ocean_dt:
if scat_bragg_model == 'romeiser97':
if pol == 'DP':
RCS_bragg_hh, RCS_bragg_vv = rcs_bragg.rcs(az_proj_angle,
surface.Diffx,
surface.Diffy)
elif pol=='hh':
RCS_bragg_hh = rcs_bragg.rcs(az_proj_angle,
surface.Diffx,
surface.Diffy)
else:
RCS_bragg_vv = rcs_bragg.rcs(az_proj_angle,
surface.Diffx,
surface.Diffy)
if use_hmtf:
# Fix Bad MTF points
surface.hMTF[np.where(surface.hMTF < -1)] = -1
if do_hh:
RCS_bragg_hh[0] *= (1 + surface.hMTF)
RCS_bragg_hh[1] *= (1 + surface.hMTF)
if do_vv:
RCS_bragg_vv[0] *= (1 + surface.hMTF)
RCS_bragg_vv[1] *= (1 + surface.hMTF)
t_last_rcs_bragg = t_now
if do_hh:
scat_bragg_hh = np.sqrt(RCS_bragg_hh)
NRCS_bragg_hh_instant_avg = np.sum(RCS_bragg_hh) / surface_area
NRCS_avg_hh[az_step] += NRCS_bragg_hh_instant_avg
if do_vv:
scat_bragg_vv = np.sqrt(RCS_bragg_vv)
NRCS_bragg_vv_instant_avg = np.sum(RCS_bragg_vv) / surface_area
NRCS_avg_vv[az_step] += NRCS_bragg_vv_instant_avg
# Doppler phases (Note: Bragg radial velocity taken constant!)
surf_phase = - (2 * k0) * sr_surface
cap_phase = (2 * k0) * t_step * c_b * (az_step + 1)
phase_bragg[0] = surf_phase - cap_phase # + dop_phase_p
phase_bragg[1] = surf_phase + cap_phase # + dop_phase_m
bragg_scats[0] = rndscat_m.scats(t_now)
bragg_scats[1] = rndscat_p.scats(t_now)
if do_hh:
scene_hh += ne.evaluate('sum(scat_bragg_hh * exp(1j*phase_bragg) * bragg_scats, axis=0)')
if do_vv:
scene_vv += ne.evaluate('sum(scat_bragg_vv * exp(1j*phase_bragg) * bragg_scats, axis=0)')
if add_point_target:
# Now we replace scattering at center by fixed value
pt_y = int(surface.Ny / 2)
pt_x = int(surface.Nx / 2)
if do_hh:
scene_hh[pt_y, pt_x] = 1000 * np.exp(-1j * 2 * k0 * sr_surface[pt_y, pt_x])
if do_vv:
scene_vv[pt_y, pt_x] = 1000 * np.exp(-1j * 2 * k0 * sr_surface[pt_y, pt_x])
## ANTENNA PATTERN
## FIXME: this assume co-located Tx and Tx, so it will not work for true bistatic configurations
if cfg.radar.L_total:
beam_pattern = sinc_1tx_nrx(sin_az, ant_L * num_ch, f0, num_ch, field=True)
else:
beam_pattern = sinc_1tx_nrx(sin_az, ant_L, f0, 1, field=True)
# GENERATE CHANEL PROFILES
if cfg.srg.two_scale_Doppler:
sr_surface_ = sr_surface4rcm
if do_hh:
proc_raw_hh_step[:, :] = 0
proc_raw_hh_ = proc_raw_hh_step
scene_bp_hh = scene_hh * beam_pattern
if do_vv:
proc_raw_vv_step[:, :] = 0
proc_raw_vv_ = proc_raw_vv_step
scene_bp_vv = scene_vv * beam_pattern
else:
sr_surface_ = sr_surface4rcm.flatten()
if do_hh:
proc_raw_hh_ = proc_raw_hh[az_step]
scene_bp_hh = (scene_hh * beam_pattern).flatten()
if do_vv:
proc_raw_vv_ = proc_raw_vv[az_step]
scene_bp_vv = (scene_vv * beam_pattern).flatten()
if do_hh:
raw.chan_profile_numba(sr_surface_,
scene_bp_hh,
sr_smp,
sr_prof.min(),
chan_sinc_vec,
n_sinc_samples, sinc_ovs,
proc_raw_hh_,
rg_only=cfg.srg.two_scale_Doppler)
if do_vv:
raw.chan_profile_numba(sr_surface_,
scene_bp_vv,
sr_smp,
sr_prof.min(),
chan_sinc_vec,
n_sinc_samples, sinc_ovs,
proc_raw_vv_,
rg_only=cfg.srg.two_scale_Doppler)
if cfg.srg.two_scale_Doppler:
#Integrate in azimuth
s_int = np.int(surface.Ny/ny_integ)
if do_hh:
proc_raw_hh[az_step] = np.sum(np.reshape(proc_raw_hh_,
(s_int, ny_integ, rg_samp)), axis=1)
info.msg("Max abs(HH): %f" % np.max(np.abs(proc_raw_hh[az_step])), importance=1)
if do_vv:
#print(proc_raw_vv.shape)
proc_raw_vv[az_step] = np.sum(np.reshape(proc_raw_vv_,
(s_int, ny_integ, rg_samp)), axis=1)
info.msg("Max abs(VV): %f" % np.max(np.abs(proc_raw_vv[az_step])), importance=1)
# SHOW PROGRESS (%)
current_progress = np.int((100*az_step)/az_steps_)
if current_progress != last_progress:
last_progress = current_progress
info.msg('SP,%d,%d,%d%%' % (rank, size, current_progress), importance=1)
if cfg.srg.two_scale_Doppler:
# No we have to up-sample and add Doppler
info.msg("skim_raw: Dopplerizing and upsampling")
print(dop0.max())
print(n_pulses_b)
print(prf)
if do_hh:
proc_raw_hh = upsample_and_dopplerize(proc_raw_hh, dop0, n_pulses_b, prf)
if do_vv:
proc_raw_vv = upsample_and_dopplerize(proc_raw_vv, dop0, n_pulses_b, prf)
# MERGE RESULTS
if do_hh:
total_raw_hh = np.empty_like(proc_raw_hh) if rank == 0 else None
comm.Reduce(proc_raw_hh, total_raw_hh, op=MPI.SUM, root=0)
if do_vv:
total_raw_vv = np.empty_like(proc_raw_vv) if rank == 0 else None
comm.Reduce(proc_raw_vv, total_raw_vv, op=MPI.SUM, root=0)
## PROCESS REDUCED RAW DATA & SAVE (ROOT)
if rank == 0:
info.msg('calibrating and saving results...')
# Filter and decimate
#range_filter = np.ones_like(total_raw)
#range_filter[:, :, rg_samp/(2*2*cfg.radar.over_fs):-rg_samp/(2*2*cfg.radar.over_fs)] = 0
#total_raw = np.fft.ifft(range_filter*np.fft.fft(total_raw))
if do_hh:
total_raw_hh = total_raw_hh[:, :cfg.radar.n_rg]
if do_vv:
total_raw_vv = total_raw_vv[:, :cfg.radar.n_rg]
# Calibration factor (projected antenna pattern integrated in azimuth)
az_axis = np.arange(-t_span/2.*v_ground, t_span/2.*v_ground, sr0*const.c/(np.pi*f0*ant_L*10.))
if cfg.radar.L_total:
pattern = sinc_1tx_nrx(az_axis/sr0, ant_L * num_ch, f0,
num_ch, field=True)
else:
pattern = sinc_1tx_nrx(az_axis/sr0, ant_L, f0, 1,
field=True)
cal_factor = (1. / np.sqrt(np.trapz(np.abs(pattern)**2., az_axis) *
sr_res/np.sin(inc_angle)))
if do_hh:
noise = (utils.db2lin(nesz, amplitude=True) / np.sqrt(2.) *
(np.random.normal(size=total_raw_hh.shape) +
1j*np.random.normal(size=total_raw_hh.shape)))
total_raw_hh = total_raw_hh * cal_factor + noise
if do_vv:
noise = (utils.db2lin(nesz, amplitude=True) / np.sqrt(2.) *
(np.random.normal(size=total_raw_vv.shape) +
1j*np.random.normal(size=total_raw_vv.shape)))
total_raw_vv = total_raw_vv * cal_factor + noise
# Add slow-time error
# if use_errors:
# if do_hh:
# total_raw_hh *= errors.beta_noise
# if do_vv:
# total_raw_vv *= errors.beta_noise
# Save RAW data
if do_hh and do_vv:
rshp = (1,) + total_raw_hh.shape
total_raw = np.concatenate((total_raw_hh.reshape(rshp),
total_raw_vv.reshape(rshp)))
rshp = (1,) + NRCS_avg_hh.shape
NRCS_avg = np.concatenate((NRCS_avg_hh.reshape(rshp),
NRCS_avg_vv.reshape(rshp)))
elif do_hh:
rshp = (1,) + total_raw_hh.shape
total_raw = total_raw_hh.reshape(rshp)
rshp = (1,) + NRCS_avg_hh.shape
NRCS_avg = NRCS_avg_hh.reshape(rshp)
else:
rshp = (1,) + total_raw_vv.shape
total_raw = total_raw_vv.reshape(rshp)
rshp = (1,) + NRCS_avg_vv.shape
NRCS_avg = NRCS_avg_vv.reshape(rshp)
raw_file = tpio.SkimRawFile(output_file, 'w', total_raw.shape)
raw_file.set('inc_angle', np.rad2deg(inc_angle))
raw_file.set('f0', f0)
# raw_file.set('num_ch', num_ch)
raw_file.set('ant_L', ant_L)
raw_file.set('prf', prf)
raw_file.set('v_ground', v_ground)
raw_file.set('orbit_alt', alt)
raw_file.set('sr0', sr0)
raw_file.set('rg_sampling', rg_bw*over_fs)
raw_file.set('rg_bw', rg_bw)
raw_file.set('raw_data*', total_raw)
raw_file.set('NRCS_avg', NRCS_avg)
raw_file.set('azimuth', cfg.radar.azimuth)
raw_file.set('dop_ref', dop_ref)
raw_file.close()
print(time.strftime("Finished [%Y-%m-%d %H:%M:%S]", time.localtime()))
if __name__ == '__main__':
# INPUT ARGUMENTS
# parser = argparse.ArgumentParser()
# parser.add_argument('-c', '--cfg_file')
# parser.add_argument('-o', '--output_file')
# parser.add_argument('-oc', '--ocean_file')
# parser.add_argument('-ro', '--reuse_ocean_file', action='store_true')
# parser.add_argument('-er', '--errors_file', type=str, default=None)
# parser.add_argument('-re', '--reuse_errors_file', action='store_true')
# args = parser.parse_args()
#
# skimraw(args.cfg_file, args.output_file,
# args.ocean_file, args.reuse_ocean_file,
# args.errors_file, args.reuse_errors_file)
##
skimraw(r"D:\research\TU Delft\Data\OceanSAR\SKIM_proxy_new.cfg",
r"D:\research\TU Delft\Data\OceanSAR\out1.nc",
r"D:\research\TU Delft\Data\OceanSAR\out2.nc",
False,
False,
False)
| gpl-3.0 |
ericdill/chxtools | chxtools/plot.py | 3 | 5392 | import numpy as np
import subprocess
from dataportal import DataBroker, DataMuxer
from dataportal.broker import EventQueue
import matplotlib.pyplot as plt
import time as ttime
import sys
from ophyd.userapi.scan_api import estimate
def new_queue(header, queue=None):
if queue is None:
queue = EventQueue(header)
return header, queue
hdr = DataBroker[-1]
if header.scan_id != hdr.scan_id:
print("New header found: Scan id = %s. uid = %s" %
(hdr.scan_id, hdr.run_start_uid))
sys.stdout.flush()
queue = EventQueue(hdr)
return hdr, queue
return header, queue
vlines = {'center_of_mass': {'color': 'red'},
'cen': {'color': 'red', 'ls': '--'},}
hlines = {'avgy': {'color': 'blue', 'ls': '-'},
'ymin': {'color': 'black', 'ls': '--'},
'ymax': {'color': 'black', 'ls': '--'}, }
points = {'cen': {'color': 'red', 'marker': 'o'},
'fwmh_left': {'color': 'red', 'marker': '<'},
'fwhm_right': {'color': 'red', 'marker': '>'}}
def plot1d(y, x=None, scans=None, live=True, sleep_time=1):
"""Plot live data and on-the-fly peak stats estimator
Parameters
----------
y : str
The name of the y value to plot
x : str, optional
The name of the value to plot on the x axis. If None, defaults
to the sequence number of the event (Note that this probably works,
but I'm not sure as it has not been tested!)
scans : list, optional
List of other scan indices to plot. uses db[] syntax, so any valid
entry to [] will work
live : bool, optional
Grab new data and plot it as it comes off. Defaults to True.
sleep_time : float, optional
Time to sleep between data updates. Defaults to 1 sec
"""
if scans is None:
scans = []
lines1 = {}
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(15,10), sharex=True)
fig.show()
for scan_id in scans:
hdr = DataBroker[scan_id]
events = DataBroker.fetch_events(hdr)
dm = DataMuxer.from_events(events)
df = dm.to_sparse_dataframe()
if x is None:
old_x = np.asarray(df.index)
else:
old_x = np.asarray(df[x])
old_y = np.asarray(df[y])
lines1[scan_id], = ax1.plot(old_x, old_y, 'o', ms=15, label=scan_id)
if x is None:
ax1.set_xlabel('scan point index')
ax2.set_xlabel('scan point index')
else:
ax1.set_xlabel(x)
ax2.set_xlabel(x)
ax1.set_ylabel(y)
ax2.set_ylabel(y)
ax1.set_title('data stream')
ax2.set_title('peak estimator')
if live:
hdr = DataBroker[-1]
scan_id = hdr.scan_id
while scan_id in lines1:
ttime.sleep(.5)
hdr = DataBroker[-1]
scan_id = hdr.scan_id
lines1[scan_id], = ax1.plot([], [], 'o', ms=15, label=scan_id)
queue = None
prev_stats = None
while True:
# loop until killed
hdr, queue = new_queue(hdr, queue)
scan_id = hdr.scan_id
queue.update()
new_events = queue.get()
try:
old_x, old_y = lines1[scan_id].get_data()
old_x = list(old_x)
old_y = list(old_y)
except KeyError:
lines1[scan_id], = ax1.plot([], [], 'o', ms=15, label=scan_id)
old_x, old_y = [], []
if x is None:
new_x = [event.seq_num for ev in new_events]
else:
new_x = [ev['data'][x] for ev in new_events]
new_y = [ev['data'][y] for ev in new_events]
new_x = old_x + new_x
new_y = old_y + new_y
lines1[scan_id].set_data(new_x, new_y)
ax1.relim(visible_only=True)
ax1.legend(loc=0).draggable()
# now deal with axis 2
try:
stats = estimate(np.asarray(new_x), np.asarray(new_y))
except ValueError:
stats = prev_stats
# print(stats)
if stats != prev_stats:
ax2.cla()
ax2.plot(new_x, new_y, 'o', ms=15, label=scan_id)
ax2.set_title('peak estimator')
for stat, vals in stats.items():
if stat in points:
# sometimes 'cen' comes back as one or two values. This
# try/except block is a way to do the right thing when
# this happens
try:
vals[0]
ax2.scatter(vals[0], vals[1], label=stat, **points[stat])
except IndexError:
ax2.axvline(vals, label=stat, **vlines[stat])
elif stat in hlines:
# draw a horizontal line
ax2.axhline(vals, label=stat, **hlines[stat])
elif stat in vlines:
# draw a vertical line
ax2.axvline(vals, label=stat, **vlines[stat])
prev_stats = stats
ax2.relim(visible_only=True)
ax2.legend(loc=0).draggable()
fig.canvas.draw()
fig.canvas.flush_events()
ttime.sleep(sleep_time)
| bsd-3-clause |
fracturica/shardlib | shardlib/comp_analysis/compAnalysisBase.py | 1 | 9410 | import matplotlib as mpl
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import dataProcessing as dp
import plotFuncs as pf
import sessionFuncs as sf
from types import *
import copy
class CompAnalysisBase(object):
def __init__(
self,
pNode,
analysisBranches,
criteria,
sifs,
errType='difference'):
self.pNode = pNode
self.crit = criteria
self.sifs = sifs
self.analysisBranches = analysisBranches
self.errType = errType
def runMethods(self):
self.setAnalysisNodes()
self.createDataStr()
self.createSelectionDict()
self.createDataDict()
def setAnalysisNodes(self):
self.aNodes = []
for a in self.analysisBranches:
if isinstance(a, (list, tuple)):
self.aNodes.append(self.pNode.getTreeBranch(a))
elif isinstance(a, (NoneType,)):
pass
else:
raise KeyError(
'Invalid value for the analysisBranches argument')
def getSubplotTitle(self, node, depth=3):
title = []
n = copy.copy(node)
if node.getNodeLevelInTree() < depth:
raise IndexError(
'Node level in tree is lower than the depth value.\
Try lower depth value.')
else:
for i in range(depth):
title.append(n.getName())
n = n.getParent()
return ' '.join(title[::-1])
def createDataStr(self):
self.dataStr = []
self.count = 0
for node in self.aNodes:
self.createDataStrEntry(node)
def getOptSimKey(self, dataDict, simIds):
optSim = [pf.getSimIdsWithLowestErrorPerDH(
simIds, self.crit[0], self.crit[1]).values()[0][0]]
optSimKey = [key for key in dataDict.keys() if dataDict[
key] == optSim][0]
return optSimKey
def createSelectionDict(self):
self.sData = []
for dd in self.dataStr:
self.sData = self.sData + dd[0].items()
self.sData = dict(self.sData)
def createDataDict(self):
self.dataDicts = []
for entry in self.dataStr:
data = self.createDataDictEntry(entry[0])
self.dataDicts.append(data)
def createDataDictEntry(self, dataStrEntry):
data = {s: {} for s in self.sifs}
for key in dataStrEntry.keys():
errs = self.getSimIdErrors(dataStrEntry[key])
for sif in self.sifs:
data[sif][key] = errs[sif]
return data
def getSimIdErrors(self, simKey):
if simKey == []:
return {s: [] for s in self.sifs}
elif isinstance(simKey, list) and len(simKey) == 1:
ad = dp.AnalysisData(simKey[0])
ad.calcAnSol()
ad.calculateStats()
errs = ad. getErrorReports()[self.errType]
return {sif: errs[sif] for sif in self.sifs}
# figure
def createFigure(self, fig):
self.syc = -0.33
self.fig = fig
self.axes = []
self.createFigureAxes()
i = 0
for sif in self.sifs:
for dDict, dStr in zip(self.dataDicts, self.dataStr):
self.createSubplotBoxplot(
self.axes[i], dDict, sif)
optBox = self.markOptSim(self.axes[i], dStr, dDict, sif)
errBox = self.markFailedSim(self.axes[i], dDict, sif)
i += 1
self.markBoxes = [optBox, errBox]
self.createLegend()
self.setYlimits()
self.setSubplotTitles()
self.setXlabels()
def createFigureAxes(self):
lboxcols = len(self.dataStr[0][0].keys())
rboxcols = (len(self.dataStr[1][0].keys())
if len(self.dataStr) == 2 else 0)
ncols = len(self.dataStr)
nrows = len(self.sifs)
if lboxcols == 0 or rboxcols == 0:
width_ratios = None
else:
width_ratios = [lboxcols, rboxcols]
gs = mpl.gridspec.GridSpec(
nrows=nrows, ncols=ncols,
width_ratios=width_ratios)
gs.update(wspace=0.04, hspace=0.08)
for i in range(nrows * ncols):
self.axes.append(self.fig.add_subplot(gs[i]))
self.axes[i].grid(True)
if i % 2 == 1 and ncols == 2:
self.axes[i].set_yticklabels([])
def createSubplotBoxplot(self, axis, dataDict, sif):
bpdata, pos, altpos = self.createBoxPlotData(dataDict[sif])
axis.boxplot(bpdata, widths=0.8, positions=pos)
axis.set_xlim(-0.5, len(pos + altpos) - 0.5)
axis.set_xticks(range(len(pos + altpos)))
axis.set_xticklabels(sorted(dataDict[sif].keys()))
def createBoxPlotData(self, dataDict):
data, pos, altpos = [], [], []
count = 0
for k in sorted(dataDict.keys()):
if dataDict[k] != []:
pos.append(count)
data.append(dataDict[k])
else:
altpos.append(count)
count += 1
return data, pos, altpos
def markOptSim(self, axes, dStr, dDict, sif):
optSimKey = dStr[1]
ob = None
bpdata, pos, altpos = self.createBoxPlotData(dDict[sif])
pos = sorted(pos + altpos)
i = sorted(dDict[sif].keys()).index(optSimKey)
ob = axes.axvspan(pos[i] - 0.45, pos[i] + 0.45, color='DarkGreen',
ec='lime', alpha=0.2)
return ob
def markFailedSim(self, axes, dDict, sif):
eb = None
bpdata, pos, altpos = self.createBoxPlotData(dDict[sif])
for p in altpos:
eb = axes.axvspan(p - 0.45, p + 0.45, color='DarkRed',
ec='red', alpha=0.2)
return eb
def createLegend(self):
i = len(self.dataStr) - 1
text = [
'optimal simulation',
'failed simulation',
'optSim for rightplot']
labels, handles = [], []
for i in range(len(self.markBoxes)):
if self.markBoxes[i] is not None:
labels.append(text[i])
handles.append(self.markBoxes[i])
if handles:
self.axes[i].legend(handles, labels, bbox_to_anchor=(1.02, 1),
loc=2, borderaxespad=0)
def setXlabels(self):
for i in range(len(self.dataStr)):
self.axes[-1 - i].set_xlabel(self.dataStr[::-1][i][2])
def setYlimits(self):
if len(self.dataStr) == 2:
for i in range(0, len(self.axes), 2):
ylims = list(self.axes[i].get_ylim()) + \
list(self.axes[i + 1].get_ylim())
self.axes[i].set_ylim(min(ylims), max(ylims))
self.axes[i + 1].set_ylim(min(ylims), max(ylims))
def setSubplotTitles(self):
for i in range(len(self.dataStr)):
title = self.dataStr[i][3]
self.axes[i].text(0.5, 1.1, title,
horizontalalignment='center',
fontsize=12,
transform=self.axes[i].transAxes)
def addSelectionAxisToAxes(self, axes, labels, color='g'):
labels = [0] + labels
tax = axes.twiny()
tax.set_xlim(axes.get_xlim())
tax.spines['top'].set_position(('axes', self.syc))
tax.set_xlabel('Selection ID Number')
tax.xaxis.set_major_locator(MultipleLocator(1))
tax.xaxis.set_tick_params(direction='out')
tax.xaxis.label.set_color(color)
tax.tick_params(axis='x', colors=color)
tax.set_xticklabels(labels)
x1 = axes.get_xlim()[0] + 0.5
x2 = axes.get_xlim()[1] - 0.5
x1 = self.convDataPointToAxesCoord(tax, (x1, 0))[0]
x2 = self.convDataPointToAxesCoord(tax, (x2, 0))[0]
l = mpl.lines.Line2D([x1, x2], [self.syc, self.syc],
transform=tax.transAxes, axes=tax, color=color,
visible=True, zorder=3, clip_on=False)
tax.add_line(l)
def convDataPointToAxesCoord(self, axis, point):
"""
Converts data point coordinates to axes coordinates
"""
x_d, y_d = axis.transData.transform(point)
xy = axis.transAxes.inverted().transform((x_d, y_d))
return xy
def convDataPointToFigCoord(self, axis, point):
x_d, y_d = axis.transData.transform(point)
xy = self.fig.transFigure.inverted().transform((x_d, y_d))
return xy
def addToQueue(self, cq, selIdNum):
if isinstance(selIdNum, (NoneType,)):
pass
elif selIdNum in self.sData.keys():
simId = self.sData[selIdNum]
cq.addSimId(simId)
else:
print 'Verify the selIdNum argument value'
# assignment
def assignSimIdAsFailed(self, trees, cq, selIdNum):
if isinstance(selIdNum, (NoneType,)):
pass
elif selIdNum in self.sData.keys():
simId = self.sData[selIdNum]
cq.removeSimIdFromQueue(simId)
sf.writeToShelve(simId, 'failed')
sf.setSimIdAsFailed(trees, [simId])
del self.sData[selIdNum]
print simId, 'simId'
else:
print 'Verify the selIdNum argument value and try again'
| mit |
reimandlab/Visualistion-Framework-for-Genome-Mutations | website/imports/sites/site_importer.py | 1 | 10011 | import warnings
from abc import abstractmethod
from itertools import chain
from typing import List, Set
from warnings import warn
from collections import Counter
from numpy import nan
from pandas import DataFrame, Series
from sqlalchemy.orm import load_only, joinedload
from tqdm import tqdm
from database import db, get_or_create, create_key_model_dict
from imports import BioImporter, protein_data as importers
# those should be moved somewhere else
from imports.protein_data import get_preferred_gene_isoform
from imports.sites.site_mapper import SiteMapper
from models import KinaseGroup, Kinase, Protein, Site, SiteType, BioModel, SiteSource, Gene
def show_warning(message, category, filename, lineno, file=None, line=None):
print(message)
warnings.showwarning = show_warning
def get_or_create_kinases(chosen_kinases_names, known_kinases, known_kinase_groups) -> [Set[Kinase], Set[KinaseGroup]]:
"""Create a subset of known kinases and known kinase groups based on given
list of kinases names ('chosen_kinases_names'). If no kinase or kinase group
of given name is known, it will be created.
Returns a tuple of sets:
kinases, groups
"""
kinases, groups = set(), set()
for name in set(chosen_kinases_names):
# handle kinases group
if name.endswith('_GROUP'):
name = name[:-6]
key = name.lower()
if key not in known_kinase_groups:
known_kinase_groups[key] = KinaseGroup(name=name)
groups.add(known_kinase_groups[key])
# if it's not a group, it surely is a kinase:
else:
key = name.lower()
if key not in known_kinases:
known_kinases[key] = Kinase(
name=name,
protein=get_preferred_gene_isoform(name)
)
kinases.add(known_kinases[key])
return kinases, groups
class SiteImporter(BioImporter):
requires = {importers.kinase_mappings, importers.proteins_and_genes}
# used for cross-isoform mapping
sequence_offset = 7
@property
@abstractmethod
def source_name(self) -> str:
"""A name of the source used to import sites"""
@property
@abstractmethod
def site_types(self) -> Set[str]:
"""List of SiteTypes to be created (or re-used by the importer)"""
def __init__(self):
print(f'Preparing {self.source_name} sites importer...')
self.issues_counter = Counter()
# caching proteins and kinases allows for much faster
# import later on, though it takes some time to cache
self.known_kinases = create_key_model_dict(Kinase, 'name', lowercase=True)
self.known_groups = create_key_model_dict(KinaseGroup, 'name', lowercase=True)
self.known_sites = create_key_model_dict(
Site, ['protein_id', 'position', 'residue'],
options=(
joinedload(Site.sources).joinedload('*')
)
)
self.proteins = create_key_model_dict(
Protein, 'refseq',
options=(
load_only('refseq', 'sequence', 'id')
.joinedload(Protein.gene)
.joinedload(Gene.isoforms)
.load_only('refseq')
)
)
# create site types
site_type_objects = [
get_or_create(SiteType, name=name)
for name in set(self.site_types)
]
self.novel_site_types = [
site_type for site_type, new in site_type_objects if new
]
self.site_types_map = {
site_type.name: site_type
for site_type, new in site_type_objects
}
self.source, _ = get_or_create(SiteSource, name=self.source_name)
print(f'{self.source_name} importer ready.')
def load(self, *args, **kwargs) -> List[BioModel]:
"""Return a list of sites and site types to be added to the database"""
self.issues_counter.clear()
print('Loading protein sites:')
objects = self.load_sites(*args, **kwargs) + self.novel_site_types + [self.source]
for issue, count in self.issues_counter.items():
print(f'Encountered {count} issues: "{issue}".')
return objects
@abstractmethod
def load_sites(self, *args, **kwargs) -> List[Site]:
"""Return a list of sites to be added to the database"""
def get_sequence_of_protein(self, site) -> str:
protein = Protein.query.filter_by(refseq=site.refseq).one()
return protein.sequence
def extract_site_surrounding_sequence(self, site) -> str:
"""Creates a pattern for site mapping using:
- ^ to indicate a site that is closer than 7 aa to N-terminal end (left end)
- $ to indicate a site that is closer than 7 aa to C-terminal end (right end)
- sequence of the protein retrieved with `get_sequence_of_protein` method,
limited to +/- 7 aa from the position of the site (determined by site.position).
The offset (default 7) can be adjusted changing `sequence_offset` class variable.
"""
protein_sequence = self.get_sequence_of_protein(site)
if not protein_sequence:
self.issues_counter['no sequence'] += 1
return nan
offset = self.sequence_offset
pos = site.position - 1
if pos < 0 or pos > len(protein_sequence):
self.issues_counter['site outside of sequence'] += 1
warn(
f'The site: {self.repr_site(site)} is outside of the protein'
f' sequence (which is {len(protein_sequence)} long)'
)
return nan
if protein_sequence[pos] != site.residue:
self.issues_counter['sequence mismatch'] += 1
warn(
f'Protein sequence at {pos} ({protein_sequence[pos]})'
f' differs from {site.residue} for: {self.repr_site(site)}.'
)
return nan
left = pos - offset
right = pos + offset + 1
if left < 0:
left = 0
prefix = '^'
else:
prefix = ''
if right > len(protein_sequence):
return prefix + protein_sequence[left:] + '$'
else:
return prefix + protein_sequence[left:right]
def determine_left_offset(self, site) -> int:
"""Return 0-based offset of the site position in extracted sequence fragment
Example:
having site 3R and sequence='MARSTS',
the left offset is 2, as sequence[2] == 'R'
"""
return min(site.position - 1, self.sequence_offset)
def map_sites_to_isoforms(self, sites: DataFrame) -> DataFrame:
if sites.empty:
return sites
# additional "sequence" column is needed to map the site across isoforms
sequences = sites.apply(self.extract_site_surrounding_sequence, axis=1)
offsets = sites.apply(self.determine_left_offset, axis=1)
sites = sites.assign(
sequence=Series(sequences).values,
left_sequence_offset=Series(offsets).values
)
old_len = len(sites)
sites.dropna(axis=0, inplace=True, subset=['sequence', 'residue'])
diff = old_len - len(sites)
print(f'Dropped {diff} ({diff/old_len * 100}%) sites due to lack of sequence or residue')
# nothing to map
if sites.empty:
return sites
mapper = SiteMapper(self.proteins, self.repr_site)
# sites loaded so far were explicitly defined in data files
mapped_sites = mapper.map_sites_by_sequence(sites)
# from now, only sites which really appear in isoform sequences
# in our database will be considered
# forget about the sequence column (no longer need)
mapped_sites.drop(columns=['sequence', 'left_sequence_offset'], inplace=True, errors='ignore')
return mapped_sites
def add_site(self, refseq, position: int, residue, mod_type, pubmed_ids=None, kinases=None):
protein = self.proteins[refseq]
site_key = (protein.id, position, residue)
site_type = self.site_types_map[mod_type]
if site_key in self.known_sites:
site = self.known_sites[site_key]
created = False
else:
site = Site(
position=position,
residue=residue,
protein_id=protein.id
)
self.known_sites[site_key] = site
created = True
site.types.add(self.site_types_map[mod_type])
site.sources.add(self.source)
if pubmed_ids:
site.pmid.update(pubmed_ids)
if kinases:
site_kinases, site_kinase_groups = get_or_create_kinases(
kinases,
self.known_kinases,
self.known_groups
)
site.kinases.update(site_kinases)
site.kinase_groups.update(site_kinase_groups)
for kinase_or_group in chain(site_kinases, site_kinase_groups):
kinase_or_group.is_involved_in.add(site_type)
return site, created
def create_site_objects(
self,
sites: DataFrame,
columns=['refseq', 'position', 'residue', 'mod_type', 'pub_med_ids', 'kinases']
) -> List[Site]:
if sites.empty:
return []
sites = sites[columns]
site_objects = []
add_site = self.add_site
print('Creating database objects:')
with db.session.no_autoflush:
for site_data in tqdm(sites.itertuples(index=False), total=len(sites)):
# split into parts and reset known sites?
site, new = add_site(*site_data)
if new:
site_objects.append(site)
return site_objects
@staticmethod
def repr_site(site):
return f'{site.position}{site.residue}'
| lgpl-2.1 |
nickmarton/kaggle | Facial Keypoints Detection/parse_predictions.py | 1 | 2275 | """Parse raw predictions into format acceptable by kaggle."""
from __future__ import division, print_function
import logging
import numpy as np
import pandas as pd
Y_MAX = 95.8089831215
Y_MIN = 3.82624305628
def set_verbosity(verbose_level=3):
"""Set the level of verbosity of the Preprocessing."""
if not type(verbose_level) == int:
raise TypeError("verbose_level must be an int")
if verbose_level < 0 or verbose_level > 4:
raise ValueError("verbose_level must be between 0 and 4")
verbosity = [logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG]
logging.basicConfig(
format='%(asctime)s:\t %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=verbosity[verbose_level])
def load_csv_files(predictions_file="raw_predictions.csv",
lookup_file="IdLookupTable.csv"):
"""Load raw predictions and lookup table csv files."""
# Get raw predictions and split predictions from ImageId's
raw_predictions = pd.read_csv(predictions_file)
image_ids = raw_predictions["ImageId"]
raw_predictions = raw_predictions.ix[:, 1:]
# Unscale predictions and add ImageId column back in
unscaled_predictions = (
(raw_predictions + 1) * (Y_MAX - Y_MIN) / 2) + Y_MIN
unscaled_predictions["ImageId"] = image_ids
output = []
# Get lookup table
lookup_table = pd.read_csv(lookup_file)
for index, row in lookup_table.iterrows():
row_id, image_id, label, loc = np.array(row)
# Get predicted location corresponding to RowId
out_loc = unscaled_predictions[
unscaled_predictions["ImageId"] == image_id][label]
output.append([row_id, np.array(out_loc)[0]])
# Log some notion of how long we have left
if index % 1000 == 0:
logging.info(
"{:.2f} percent done".format(index / len(lookup_table) * 100))
out_df = pd.DataFrame(output, columns=["RowId", "Location"])
return out_df
def main():
"""."""
set_verbosity(3)
out_df = load_csv_files("raw_predictions_750.csv")
out_df.to_csv("Predictions_750.csv", index=False)
if __name__ == "__main__":
main()
| mit |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/pylab_examples/barchart_demo2.py | 6 | 4284 | """
Thanks Josh Hemann for the example
This examples comes from an application in which grade school gym
teachers wanted to be able to show parents how their child did across
a handful of fitness tests, and importantly, relative to how other
children did. To extract the plotting code for demo purposes, we'll
just make up some data for little Johnny Doe...
"""
import numpy as np
import matplotlib.pyplot as plt
import pylab
from matplotlib.ticker import MaxNLocator
student = 'Johnny Doe'
grade = 2
gender = 'boy'
cohortSize = 62 # The number of other 2nd grade boys
numTests = 5
testNames = ['Pacer Test', 'Flexed Arm\n Hang', 'Mile Run', 'Agility',
'Push Ups']
testMeta = ['laps', 'sec', 'min:sec', 'sec', '']
scores = ['7', '48', '12:52', '17', '14']
rankings = np.round(np.random.uniform(0, 1, numTests)*100, 0)
fig, ax1 = plt.subplots(figsize=(9, 7))
plt.subplots_adjust(left=0.115, right=0.88)
fig.canvas.set_window_title('Eldorado K-8 Fitness Chart')
pos = np.arange(numTests)+0.5 # Center bars on the Y-axis ticks
rects = ax1.barh(pos, rankings, align='center', height=0.5, color='m')
ax1.axis([0, 100, 0, 5])
pylab.yticks(pos, testNames)
ax1.set_title('Johnny Doe')
plt.text(50, -0.5, 'Cohort Size: ' + str(cohortSize),
horizontalalignment='center', size='small')
# Set the right-hand Y-axis ticks and labels and set X-axis tick marks at the
# deciles
ax2 = ax1.twinx()
ax2.plot([100, 100], [0, 5], 'white', alpha=0.1)
ax2.xaxis.set_major_locator(MaxNLocator(11))
xticks = pylab.setp(ax2, xticklabels=['0', '10', '20', '30', '40', '50', '60',
'70', '80', '90', '100'])
ax2.xaxis.grid(True, linestyle='--', which='major', color='grey',
alpha=0.25)
#Plot a solid vertical gridline to highlight the median position
plt.plot([50, 50], [0, 5], 'grey', alpha=0.25)
# Build up the score labels for the right Y-axis by first appending a carriage
# return to each string and then tacking on the appropriate meta information
# (i.e., 'laps' vs 'seconds'). We want the labels centered on the ticks, so if
# there is no meta info (like for pushups) then don't add the carriage return to
# the string
def withnew(i, scr):
if testMeta[i] != '':
return '%s\n' % scr
else:
return scr
scoreLabels = [withnew(i, scr) for i, scr in enumerate(scores)]
scoreLabels = [i+j for i, j in zip(scoreLabels, testMeta)]
# set the tick locations
ax2.set_yticks(pos)
# set the tick labels
ax2.set_yticklabels(scoreLabels)
# make sure that the limits are set equally on both yaxis so the ticks line up
ax2.set_ylim(ax1.get_ylim())
ax2.set_ylabel('Test Scores')
#Make list of numerical suffixes corresponding to position in a list
# 0 1 2 3 4 5 6 7 8 9
suffixes = ['th', 'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th']
ax2.set_xlabel('Percentile Ranking Across ' + str(grade) + suffixes[grade]
+ ' Grade ' + gender.title() + 's')
# Lastly, write in the ranking inside each bar to aid in interpretation
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
# Figure out what the last digit (width modulo 10) so we can add
# the appropriate numerical suffix (e.g., 1st, 2nd, 3rd, etc)
lastDigit = width % 10
# Note that 11, 12, and 13 are special cases
if (width == 11) or (width == 12) or (width == 13):
suffix = 'th'
else:
suffix = suffixes[lastDigit]
rankStr = str(width) + suffix
if (width < 5): # The bars aren't wide enough to print the ranking inside
xloc = width + 1 # Shift the text to the right side of the right edge
clr = 'black' # Black against white background
align = 'left'
else:
xloc = 0.98*width # Shift the text to the left side of the right edge
clr = 'white' # White on magenta
align = 'right'
# Center the text vertically in the bar
yloc = rect.get_y()+rect.get_height()/2.0
ax1.text(xloc, yloc, rankStr, horizontalalignment=align,
verticalalignment='center', color=clr, weight='bold')
plt.show()
| mit |
biocore/American-Gut | tests/test_diversity_analysis.py | 5 | 38742 | #!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import skbio
from os import rmdir
from os.path import realpath, dirname, join as pjoin, exists
from pandas import Series, DataFrame, Index
from pandas.util.testing import assert_index_equal, assert_frame_equal
from americangut.diversity_analysis import (pad_index,
check_dir,
post_hoc_pandas,
multiple_correct_post_hoc,
get_distance_vectors,
segment_colormap,
_get_bar_height,
_get_p_value,
_correct_p_value,
split_taxa,
get_ratio_heatmap)
__author__ = "Justine Debelius"
__copyright__ = "Copyright 2014"
__credits__ = ["Justine Debelius"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Justine Debelius"
__email__ = "[email protected]"
# Determines the location fo the reference files
TEST_DIR = dirname(realpath(__file__))
class DiversityAnalysisTest(TestCase):
def setUp(self):
# Sets up lists for the data frame
self.ids = ['000001181.5654', '000001096.8485', '000001348.2238',
'000001239.2471', '000001925.5603', '000001098.6354',
'000001577.8059', '000001778.097' , '000001969.1967',
'000001423.7093', '000001180.1049', '000001212.5887',
'000001984.9281', '000001025.9349', '000001464.5884',
'000001800.6787', '000001629.5398', '000001473.443',
'000001351.1149', '000001223.1658', '000001884.0338',
'000001431.6762', '000001436.0807', '000001726.2609',
'000001717.784' , '000001290.9612', '000001806.4843',
'000001490.0658', '000001719.4572', '000001244.6229',
'000001092.3014', '000001315.8661', '000001525.8659',
'000001864.7889', '000001816.9' , '000001916.7858',
'000001261.3164', '000001593.2364', '000001817.3052',
'000001879.8596', '000001509.217' , '000001658.4638',
'000001741.9117', '000001940.457' , '000001620.315' ,
'000001706.6473', '000001287.1914', '000001370.8878',
'000001943.0664', '000001187.2735', '000001065.4497',
'000001598.6903', '000001254.2929', '000001526.143' ,
'000001980.8969', '000001147.6823', '000001745.3174',
'000001978.6417', '000001547.4582', '000001649.7564',
'000001752.3511', '000001231.5535', '000001875.7213',
'000001247.5567', '000001412.7777', '000001364.1045',
'000001124.3191', '000001654.0339', '000001795.4842',
'000001069.8469', '000001149.2945', '000001858.8903',
'000001667.8228', '000001648.5881', '000001775.0501',
'000001023.1689', '000001001.0859', '000001129.0853',
'000001992.9674', '000001174.3727', '000001126.3446',
'000001553.099' , '000001700.7898', '000001345.5369',
'000001821.4033', '000001921.0702', '000001368.0382',
'000001589.0756', '000001428.6135', '000001417.7107',
'000001050.2949', '000001549.0374', '000001169.7575',
'000001827.0751', '000001974.5358', '000001081.3137',
'000001452.7866', '000001194.8171', '000001781.3765',
'000001676.7693', '000001536.9816', '000001123.9341',
'000001950.0472', '000001386.1622', '000001789.8068',
'000001434.209', '000001156.782' , '000001630.8111',
'000001930.9789', '000001136.2997', '000001901.1578',
'000001358.6365', '000001834.4873', '000001175.739' ,
'000001565.3199', '000001532.5022', '000001844.4434',
'000001374.6652', '000001066.9395', '000001277.3526',
'000001765.7054', '000001195.7903', '000001403.1857',
'000001267.8034', '000001463.8063', '000001567.256' ,
'000001986.3291', '000001912.5336', '000001179.8083',
'000001539.4475', '000001702.7498', '000001362.2036',
'000001605.3957', '000001966.5905', '000001690.2717',
'000001796.78' , '000001965.9646', '000001570.6394',
'000001344.0749', '000001505.094' , '000001500.3763',
'000001887.334' , '000001896.9071', '000001061.5473',
'000001210.8434', '000001762.6421', '000001389.9375',
'000001747.7094', '000001275.7608', '000001100.6327',
'000001832.2851', '000001627.4754', '000001811.8183',
'000001202.8991', '000001163.3137', '000001196.7148',
'000001318.8771', '000001155.3022', '000001724.2977',
'000001737.328' , '000001289.1381', '000001480.495',
'000001797.7651', '000001117.9836', '000001108.0792',
'000001060.2191', '000001379.0706', '000001513.9224',
'000001731.9258', '000001563.7487', '000001988.1656',
'000001594.7285', '000001909.1042', '000001920.0818',
'000001999.9644', '000001133.9942', '000001608.1459',
'000001784.159' , '000001543.759' , '000001669.3403',
'000001545.3456', '000001177.5607', '000001387.8614',
'000001086.4642', '000001514.2136', '000001329.4163',
'000001757.7272', '000001574.9939', '000001750.1329',
'000001682.8423', '000001331.238' , '000001330.6685',
'000001556.6615', '000001575.4633', '000001754.591' ,
'000001456.5672', '000001707.2857', '000001164.864' ,
'000001466.7766', '000001383.5692', '000001911.8425',
'000001880.6072', '000001278.4999', '000001671.8068',
'000001301.3063', '000001071.2867', '000001192.7655',
'000001954.0541', '000001041.0466', '000001862.7417',
'000001587.4996', '000001242.6044', '000001040.399' ,
'000001744.3975', '000001189.5132', '000001885.0033',
'000001193.7964', '000001204.533' , '000001279.8583',
'000001488.2298', '000001971.1838', '000001492.0943',
'000001722.285' , '000001947.5481', '000001054.343' ,
'000001227.5756', '000001603.0731', '000001948.0095',
'000001393.6518', '000001661.6287', '000001829.9104',
'000001342.3216', '000001341.7147', '000001994.1765',
'000001400.0325', '000001324.5159', '000001355.789' ,
'000001538.6368', '000001121.0767', '000001377.1835',
'000001831.3158', '000001968.0205', '000001003.7916',
'000001502.0367', '000001729.5203', '000001284.1266',
'000001252.1786', '000001533.2349', '000001198.741' ,
'000001483.1918', '000001528.3996', '000001304.2649',
'000001281.7718', '000001441.8902', '000001203.4813',
'000001657.915' , '000001668.1396', '000001560.6021',
'000001213.1081', '000001894.5208', '000001791.9156',
'000001371.9864', '000001631.1904', '000001635.3301',
'000001541.2899', '000001748.311' , '000001326.0745',
'000001736.2491', '000001028.1898', '000001997.5772',
'000001764.9201', '000001664.4968', '000001031.0638',
'000001457.8448', '000001335.8157', '000001053.361' ,
'000001372.2917', '000001847.3652', '000001746.7838',
'000001173.0655', '000001653.9771', '000001104.8455',
'000001642.548' , '000001866.4881', '000001381.5643',
'000001673.6333', '000001839.2794', '000001855.195' ,
'000001698.1673', '000001813.0695', '000001153.6346',
'000001354.0321', '000001035.5915', '000001469.6652',
'000001422.9333', '000001148.4367', '000001551.0986',
'000001047.9434', '000001160.0422', '000001621.3736']
self.raw_ids = ['1181.5654', '1096.8485', '1348.2238', '1239.2471',
'1925.5603', '1098.6354', '1577.8059', '1778.097',
'1969.1967', '1423.7093', '1180.1049', '1212.5887',
'1984.9281', '1025.9349', '1464.5884', '1800.6787',
'1629.5398', '1473.443', '1351.1149', '1223.1658',
'1884.0338', '1431.6762', '1436.0807', '1726.2609',
'1717.784', '1290.9612', '1806.4843', '1490.0658',
'1719.4572', '1244.6229', '1092.3014', '1315.8661',
'1525.8659', '1864.7889', '1816.9', '1916.7858',
'1261.3164', '1593.2364', '1817.3052', '1879.8596',
'1509.217', '1658.4638', '1741.9117', '1940.457',
'1620.315', '1706.6473', '1287.1914', '1370.8878',
'1943.0664', '1187.2735', '1065.4497', '1598.6903',
'1254.2929', '1526.143', '1980.8969', '1147.6823',
'1745.3174', '1978.6417', '1547.4582', '1649.7564',
'1752.3511', '1231.5535', '1875.7213', '1247.5567',
'1412.7777', '1364.1045', '1124.3191', '1654.0339',
'1795.4842', '1069.8469', '1149.2945', '1858.8903',
'1667.8228', '1648.5881', '1775.0501', '1023.1689',
'1001.0859', '1129.0853', '1992.9674', '1174.3727',
'1126.3446', '1553.099', '1700.7898', '1345.5369',
'1821.4033', '1921.0702', '1368.0382', '1589.0756',
'1428.6135', '1417.7107', '1050.2949', '1549.0374',
'1169.7575', '1827.0751', '1974.5358', '1081.3137',
'1452.7866', '1194.8171', '1781.3765', '1676.7693',
'1536.9816', '1123.9341', '1950.0472', '1386.1622',
'1789.8068', '1434.209', '1156.782', '1630.8111',
'1930.9789', '1136.2997', '1901.1578', '1358.6365',
'1834.4873', '1175.739', '1565.3199', '1532.5022',
'1844.4434', '1374.6652', '1066.9395', '1277.3526',
'1765.7054', '1195.7903', '1403.1857', '1267.8034',
'1463.8063', '1567.256', '1986.3291', '1912.5336',
'1179.8083', '1539.4475', '1702.7498', '1362.2036',
'1605.3957', '1966.5905', '1690.2717', '1796.78',
'1965.9646', '1570.6394', '1344.0749', '1505.094',
'1500.3763', '1887.334', '1896.9071', '1061.5473',
'1210.8434', '1762.6421', '1389.9375', '1747.7094',
'1275.7608', '1100.6327', '1832.2851', '1627.4754',
'1811.8183', '1202.8991', '1163.3137', '1196.7148',
'1318.8771', '1155.3022', '1724.2977', '1737.328',
'1289.1381', '1480.495', '1797.7651', '1117.9836',
'1108.0792', '1060.2191', '1379.0706', '1513.9224',
'1731.9258', '1563.7487', '1988.1656', '1594.7285',
'1909.1042', '1920.0818', '1999.9644', '1133.9942',
'1608.1459', '1784.159', '1543.759', '1669.3403',
'1545.3456', '1177.5607', '1387.8614', '1086.4642',
'1514.2136', '1329.4163', '1757.7272', '1574.9939',
'1750.1329', '1682.8423', '1331.238', '1330.6685',
'1556.6615', '1575.4633', '1754.591', '1456.5672',
'1707.2857', '1164.864', '1466.7766', '1383.5692',
'1911.8425', '1880.6072', '1278.4999', '1671.8068',
'1301.3063', '1071.2867', '1192.7655', '1954.0541',
'1041.0466', '1862.7417', '1587.4996', '1242.6044',
'1040.399', '1744.3975', '1189.5132', '1885.0033',
'1193.7964', '1204.533', '1279.8583', '1488.2298',
'1971.1838', '1492.0943', '1722.285', '1947.5481',
'1054.343', '1227.5756', '1603.0731', '1948.0095',
'1393.6518', '1661.6287', '1829.9104', '1342.3216',
'1341.7147', '1994.1765', '1400.0325', '1324.5159',
'1355.789', '1538.6368', '1121.0767', '1377.1835',
'1831.3158', '1968.0205', '1003.7916', '1502.0367',
'1729.5203', '1284.1266', '1252.1786', '1533.2349',
'1198.741', '1483.1918', '1528.3996', '1304.2649',
'1281.7718', '1441.8902', '1203.4813', '1657.915',
'1668.1396', '1560.6021', '1213.1081', '1894.5208',
'1791.9156', '1371.9864', '1631.1904', '1635.3301',
'1541.2899', '1748.311', '1326.0745', '1736.2491',
'1028.1898', '1997.5772', '1764.9201', '1664.4968',
'1031.0638', '1457.8448', '1335.8157', '1053.361',
'1372.2917', '1847.3652', '1746.7838', '1173.0655',
'1653.9771', '1104.8455', '1642.548', '1866.4881',
'1381.5643', '1673.6333', '1839.2794', '1855.195',
'1698.1673', '1813.0695', '1153.6346', '1354.0321',
'1035.5915', '1469.6652', '1422.9333', '1148.4367',
'1551.0986', '1047.9434', '1160.0422', '1621.3736']
self.website = ['twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'twitter', 'twitter', 'twitter', 'twitter', 'twitter',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'facebook', 'facebook', 'facebook', 'facebook',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit',
'reddit', 'reddit', 'reddit', 'reddit', 'reddit']
self.time = np.array([43.75502506, 32.09982846, 66.44821015,
54.67751100, 74.43663107, 64.91509381,
101.03624273, 42.50120543, 35.92898678,
50.84800153, 46.32394154, 55.82813196,
63.90361272, 77.13825762, 78.76436441,
53.64704526, 64.75223193, 58.39207272,
52.44353642, 60.38707826, 56.51714085,
55.72374379, 59.52585080, 62.99625025,
40.04902494, 89.02585909, 63.23240605,
47.06553888, 73.00190315, 83.80903794,
43.41851989, 25.83410322, 68.21623464,
50.43442676, 49.98389215, 40.24409163,
73.12600309, 59.26529974, 61.66301113,
82.24776146, 69.88472085, 55.33333433,
40.29625976, 68.09510810, 66.85545440,
66.44002527, 72.37790419, 72.81679314,
55.09080142, 48.37538346, 47.60326036,
51.52223083, 56.51417473, 83.04863572,
52.14761947, 81.71073287, 40.88456188,
61.76308339, 75.31540245, 64.41482716,
52.36763551, 64.48863043, 42.46265519,
76.41626766, 73.35103300, 60.13966132,
55.09395578, 72.26945197, 64.14173225,
59.39558958, 54.92166432, 56.15937888,
35.82839971, 80.22338349, 52.03277136,
30.46794613, 58.48158453, 51.08064303,
67.56882508, 64.67001088, 70.31701029,
69.69418892, 45.40860831, 68.72559847,
57.18659048, 79.66512776, 54.12521925,
81.23543425, 79.58214820, 34.09101162,
34.07926981, 53.68661297, 84.73351889,
76.98667389, 83.91038109, 66.35125602,
43.38243470, 60.07458569, 64.01561208,
70.66573983, 193.40761370, 149.46771172,
178.54940784, 146.81737462, 112.67080963,
105.79566831, 169.60015351, 18.16782312,
32.33793705, 161.72043630, 136.65935083,
23.99200240, 124.30215961, 82.66230873,
181.53122374, 96.73843934, 149.75297762,
119.92104479, 29.30535556, 88.98066487,
82.18281694, 99.76251178, 120.62310261,
136.15837651, 140.85019656, 117.06990731,
163.65366512, 214.50717765, 79.72206954,
138.03112015, 144.45114437, 16.41512219,
72.08551518, 85.46372630, 149.13372767,
76.92212059, 109.55645713, 141.65595764,
119.18734692, 51.20662038, 183.75411201,
132.56555213, 101.55378472, 177.69500317,
130.27160521, 143.13166882, 107.23696643,
212.72330518, 130.66925461, 210.11532010,
118.65653641, 77.25638890, 153.29389237,
146.97514023, 0, 105.83704268,
200.05768527, 166.46158871, 135.60586892,
111.06739555, 71.50642636, 21.58216051,
183.15691697, 38.58822892, 38.84706613,
119.36492288, 108.77038019, 88.70541115,
12.61048676, 0, 157.77516036,
43.70631550, 193.87291179, 203.26411137,
179.20054809, 148.37792309, 170.38620220,
102.23651707, 63.46142967, 82.33043919,
258.68968847, 223.94730803, 281.46276889,
350.40078080, 281.53639290, 305.90987647,
286.22932832, 356.53308940, 276.81798226,
305.04298118, 299.13866751, 310.41638501,
347.77589112, 278.37912458, 295.00398672,
292.23076451, 348.14209652, 289.14551826,
288.86118512, 299.21300848, 264.29449774,
353.26294987, 275.68453639, 279.45885854,
287.79470948, 303.34990705, 324.73398364,
337.50702196, 326.59649321, 307.14724645,
300.13203731, 335.28447725, 273.59560986,
315.71949943, 268.86100671, 309.44822617,
357.67123883, 313.70684577, 311.99209985,
277.87145259, 316.89239037, 254.39694340,
300.02140552, 237.21539997, 329.92714491,
318.32432005, 326.65600788, 305.40145477,
326.78894825, 318.92641904, 320.59443395,
308.26919092, 300.00328438, 294.61849344,
284.55947774, 277.63798594, 359.44015820,
292.55982554, 322.71946292, 318.60262991,
307.93128984, 282.51266904, 304.74114309,
285.30356994, 240.53264849, 252.69086070,
289.49431273, 284.68590654, 317.95577632,
288.39433522, 303.55186227, 286.21794163,
281.11550530, 297.15770465, 307.37441274,
290.21885096, 297.39693356, 325.12591032,
340.14615302, 314.10755364, 321.41818630,
302.46825284, 272.60859596, 285.02155314,
260.57728373, 301.01186081, 314.01532677,
301.39435122, 301.53108663, 290.81233377,
331.20632569, 329.26192444, 252.12513671,
294.17604509, 314.25160994, 260.22225619,
296.06068483, 328.70473699, 293.72532762,
323.92449714, 279.36077985, 327.10547840,
332.33552711, 244.70073987, 368.94370441,
288.52914183, 270.96734651, 321.09234466,
395.74872017, 311.64415600, 314.81990465,
319.70690366, 313.96061624, 275.38526052,
338.02460670, 286.98781666, 353.55909038,
306.62353307, 306.92733543, 273.74222557])
# # Creates a data frame object
self.df = DataFrame({'WEBSITE': Series(self.website, index=self.ids),
'DWELL_TIME': Series(self.time, index=self.ids)})
# Creates the distance matrix object
self.ids2 = np.array(['000001181.5654', '000001096.8485',
'000001348.2238', '000001239.2471',
'000001925.5603', '000001148.4367',
'000001551.0986', '000001047.9434',
'000001160.0422', '000001621.3736'])
self.map = self.df.loc[self.ids2]
dist = np.array([[0.000, 0.297, 0.257, 0.405, 0.131, 0.624, 0.934,
0.893, 0.519, 0.904],
[0.297, 0.000, 0.139, 0.130, 0.348, 1.000, 0.796,
1.000, 0.647, 0.756],
[0.257, 0.139, 0.000, 0.384, 0.057, 0.748, 0.599,
0.710, 0.528, 1.000],
[0.405, 0.130, 0.384, 0.000, 0.303, 0.851, 0.570,
0.698, 1.000, 0.638],
[0.131, 0.348, 0.057, 0.303, 0.000, 0.908, 1.000,
0.626, 0.891, 1.000],
[0.624, 1.000, 0.748, 0.851, 0.908, 0.000, 0.264,
0.379, 0.247, 0.385],
[0.934, 0.796, 0.599, 0.570, 1.000, 0.264, 0.000,
0.336, 0.326, 0.530],
[0.893, 1.000, 0.710, 0.698, 0.626, 0.379, 0.336,
0.000, 0.257, 0.450],
[0.519, 0.647, 0.528, 1.000, 0.891, 0.247, 0.326,
0.257, 0.000, 0.492],
[0.904, 0.756, 1.000, 0.638, 1.000, 0.385, 0.530,
0.450, 0.492, 0.000]])
self.dm = skbio.DistanceMatrix(dist, self.ids2)
self.taxa = ['k__Bacteria; p__[Proteobacteria]; '
'c__Gammaproteobacteria; o__; f__; g__; s__',
'k__Bacteria; p__Proteobacteria; '
'c__Gammaproteobacteria; o__Enterobacteriales; '
'f__Enterbacteriaceae; g__Escherichia; s__coli']
self.sub_p = DataFrame(np.array([['ref_group1 vs. ref_group1',
'ref_group1 vs. group1', 0.01],
['ref_group2 vs. group2',
'ref_group2 vs. ref_group2', 0.02],
['group3 vs. ref_group3',
'ref_group3 vs. ref_group3', 0.03],
['ref_group4 vs. ref_group4',
'group4 vs. ref_group4', 0.04]]),
columns=['Group 1', 'Group 2', 'p_value'])
self.sub_p.p_value = self.sub_p.p_value.astype(float)
self.sub_p_lookup = {k: set(self.sub_p[k].values) for k in
('Group 1', 'Group 2')}
def test_pad_index_default(self):
# Creates a data frame with raw ids and no sample column
df = DataFrame({'#SampleID': self.raw_ids,
'WEBSITE': Series(self.website),
'DWELL_TIME': Series(self.time)})
# Pads the raw text
df = pad_index(df)
assert_index_equal(self.df.index, df.index)
def test_pad_index_custom_index(self):
# Creates a data frame with raw ids and no sample column
df = DataFrame({'RawID': self.raw_ids,
'WEBSITE': Series(self.website),
'DWELL_TIME': Series(self.time)})
# Pads the raw text
df = pad_index(df, index_col='RawID')
assert_index_equal(self.df.index, df.index)
def test_pad_index_number(self):
# Creates a data frame with raw ids and no sample column
df = DataFrame({'#SampleID': self.raw_ids,
'WEBSITE': Series(self.website),
'DWELL_TIME': Series(self.time)})
# Pads the raw text
df = pad_index(df, nzeros=4)
assert_index_equal(Index(self.raw_ids), df.index)
def test_check_dir(self):
# Sets up a dummy directory that does not exist
does_not_exist = pjoin(TEST_DIR, 'this_dir_does_not_exist')
# Checks the directory does not currently exist
self.assertFalse(exists(does_not_exist))
# checks the directory
check_dir(does_not_exist)
# Checks the directory exists now
self.assertTrue(exists(does_not_exist))
# Removes the directory
rmdir(does_not_exist)
def test_post_hoc_pandas(self):
known_index = Index(['twitter', 'facebook', 'reddit'],
name='WEBSITE')
known_df = DataFrame(np.array([[100, 60.435757, 60.107124, 14.632637,
np.nan, np.nan],
[80, 116.671135, 119.642984, 54.642403,
7.010498e-14, np.nan],
[120, 302.615690, 301.999670,
28.747101, 2.636073e-37,
5.095701e-33]]),
index=known_index,
columns=['Counts', 'Mean', 'Median', 'Stdv',
'twitter', 'facebook'])
known_df.Counts = known_df.Counts.astype('int64')
test_df = post_hoc_pandas(self.df, 'WEBSITE', 'DWELL_TIME')
assert_frame_equal(known_df, test_df)
def test_multiple_correct_post_hoc(self):
known_df = DataFrame(np.array([[np.nan, 4e-2, 1e-3],
[4e-4, np.nan, 1e-6],
[4e-7, 4e-8, np.nan]]),
columns=[0, 1, 2])
raw_ph = DataFrame(np.power(10, -np.array([[np.nan, 2, 3],
[4, np.nan, 6],
[7, 8, np.nan]])),
columns=[0, 1, 2])
order = np.arange(0, 3)
test_df = multiple_correct_post_hoc(raw_ph, order, 'fdr_bh')
assert_frame_equal(known_df, test_df)
def test_segemented_colormap(self):
known_cmap = np.array([[0.88207613, 0.95386390, 0.69785469, 1.],
[0.59215687, 0.84052289, 0.72418302, 1.],
[0.25268744, 0.71144946, 0.76838141, 1.],
[0.12026144, 0.50196080, 0.72156864, 1.],
[0.14136102, 0.25623991, 0.60530568, 1.]])
test_cmap = segment_colormap('YlGnBu', 5)
npt.assert_almost_equal(test_cmap, known_cmap, 5)
def test_get_bar_height(self):
test_lowest, test_fudge = \
_get_bar_height(np.array([0.01, 0.02, 0.3, 0.52]))
npt.assert_almost_equal(test_lowest, 0.55, 3)
self.assertEqual(test_fudge, 10)
def test_get_bar_height_fudge(self):
test_lowest, test_fudge = \
_get_bar_height(np.array([0.01, 0.02, 0.3, 0.52]), factor=3)
self.assertEqual(test_lowest, 0.54)
self.assertEqual(test_fudge, 10)
def test_get_p_value(self):
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group1', 'group1', 'p_value'), 0.01)
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group2', 'group2', 'p_value'), 0.02)
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group3', 'group3', 'p_value'), 0.03)
self.assertEqual(_get_p_value(self.sub_p, self.sub_p_lookup,
'ref_group4', 'group4', 'p_value'), 0.04)
def test_get_p_value_error(self):
with self.assertRaises(ValueError):
_get_p_value(self.sub_p, self.sub_p_lookup, 'ref_group',
'group', 'p_value')
def test_correct_p_value_no_tail(self):
p_value = 0.05
tail = False
self.assertEqual(_correct_p_value(tail, p_value, 1, 1), p_value)
def test_correct_p_value_no_greater_ref(self):
p_value = 0.05
tail = True
self.assertEqual(_correct_p_value(tail, p_value, 2, 1), 1)
def test_correct_p_value_no_less_ref(self):
p_value = 0.05
tail = True
self.assertEqual(_correct_p_value(tail, p_value, 1, 2), p_value)
def test_get_distance_vectors(self):
known_within = {'twitter': np.array([0.297, 0.257, 0.405, 0.131,
0.139, 0.130, 0.348, 0.384,
0.057, 0.303]),
'reddit': np.array([0.264, 0.379, 0.247, 0.385, 0.336,
0.326, 0.530, 0.257, 0.450,
0.492])}
known_between = {('twitter', 'reddit'): np.array([0.624, 0.934, 0.893,
0.519, 0.904, 1.000,
0.796, 1.000, 0.647,
0.756, 0.748, 0.599,
0.710, 0.528, 1.000,
0.851, 0.570, 0.698,
1.000, 0.638, 0.908,
1.000, 0.626, 0.891,
1.000])}
test_within, test_between = \
get_distance_vectors(dm=self.dm,
df=self.map,
group='WEBSITE',
order=['twitter', 'reddit'])
# Tests the results
self.assertEqual(known_within.keys(), test_within.keys())
self.assertEqual(known_between.keys(), test_between.keys())
for k, a in test_within.iteritems():
npt.assert_array_equal(known_within[k], a)
for k, a in test_between.iteritems():
npt.assert_array_equal(known_between[k], a)
def test_split_taxa_error(self):
with self.assertRaises(ValueError):
split_taxa(['k__Bacteria; p__[Proteobacteria]; '
'c__Gammaproteobacteria'], 7)
def test_split_taxa(self):
known_taxa = np.array([['Bacteria', 'cont. Proteobacteria',
'Gammaproteobacteria',
'c. Gammaproteobacteria',
'c. Gammaproteobacteria',
'c. Gammaproteobacteria',
'c. Gammaproteobacteria'],
['Bacteria', 'Proteobacteria',
'Gammaproteobacteria', 'Enterobacteriales',
'Enterbacteriaceae', 'Escherichia', 'coli']],
dtype='|S32')
known_levels = ['kingdom', 'phylum', 'p_class', 'p_order', 'family',
'genus', 'species']
test_taxa, test_levels = split_taxa(self.taxa, 7)
self.assertEqual(known_levels, test_levels)
npt.assert_array_equal(known_taxa, test_taxa)
def test_get_ratio_heatmap(self):
data = np.array([[1, 2, 3, 4],
[2, 4, 6, 8],
[3, 6, 9, 12],
[4, 8, 12, 16]])
known = np.array([[0.4, 0.8, 1.2, 1.6],
[0.4, 0.8, 1.2, 1.6],
[0.4, 0.8, 1.2, 1.6],
[0.4, 0.8, 1.2, 1.6]])
test = get_ratio_heatmap(data)
npt.assert_array_equal(test, known)
def test_get_ratio_heatmap_log(self):
data = np.array([[2, 4, 8, 16],
[1, 4, 16, 256]])
known = np.array([[0, 1, 2, 3],
[0, 2, 4, 8]])
test = get_ratio_heatmap(data, ref_pos=0, log=2)
npt.assert_array_equal(test, known)
if __name__ == '__main__':
main()
| bsd-3-clause |
apache/spark | python/pyspark/pandas/plot/plotly.py | 14 | 7646 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING, Union
import pandas as pd
from pyspark.pandas.plot import (
HistogramPlotBase,
name_like_string,
PandasOnSparkPlotAccessor,
BoxPlotBase,
KdePlotBase,
)
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
def plot_pandas_on_spark(data: Union["ps.DataFrame", "ps.Series"], kind: str, **kwargs):
import plotly
# pandas-on-Spark specific plots
if kind == "pie":
return plot_pie(data, **kwargs)
if kind == "hist":
return plot_histogram(data, **kwargs)
if kind == "box":
return plot_box(data, **kwargs)
if kind == "kde" or kind == "density":
return plot_kde(data, **kwargs)
# Other plots.
return plotly.plot(PandasOnSparkPlotAccessor.pandas_plot_data_map[kind](data), kind, **kwargs)
def plot_pie(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
from plotly import express
data = PandasOnSparkPlotAccessor.pandas_plot_data_map["pie"](data)
if isinstance(data, pd.Series):
pdf = data.to_frame()
return express.pie(pdf, values=pdf.columns[0], names=pdf.index, **kwargs)
elif isinstance(data, pd.DataFrame):
values = kwargs.pop("y", None)
default_names = None
if values is not None:
default_names = data.index
return express.pie(
data,
values=kwargs.pop("values", values),
names=kwargs.pop("names", default_names),
**kwargs,
)
else:
raise RuntimeError("Unexpected type: [%s]" % type(data))
def plot_histogram(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
import plotly.graph_objs as go
import pyspark.pandas as ps
bins = kwargs.get("bins", 10)
y = kwargs.get("y")
if y and isinstance(data, ps.DataFrame):
# Note that the results here are matched with matplotlib. x and y
# handling is different from pandas' plotly output.
data = data[y]
psdf, bins = HistogramPlotBase.prepare_hist_data(data, bins)
assert len(bins) > 2, "the number of buckets must be higher than 2."
output_series = HistogramPlotBase.compute_hist(psdf, bins)
prev = float("%.9f" % bins[0]) # to make it prettier, truncate.
text_bins = []
for b in bins[1:]:
norm_b = float("%.9f" % b)
text_bins.append("[%s, %s)" % (prev, norm_b))
prev = norm_b
text_bins[-1] = text_bins[-1][:-1] + "]" # replace ) to ] for the last bucket.
bins = 0.5 * (bins[:-1] + bins[1:])
output_series = list(output_series)
bars = []
for series in output_series:
bars.append(
go.Bar(
x=bins,
y=series,
name=name_like_string(series.name),
text=text_bins,
hovertemplate=(
"variable=" + name_like_string(series.name) + "<br>value=%{text}<br>count=%{y}"
),
)
)
fig = go.Figure(data=bars, layout=go.Layout(barmode="stack"))
fig["layout"]["xaxis"]["title"] = "value"
fig["layout"]["yaxis"]["title"] = "count"
return fig
def plot_box(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
import plotly.graph_objs as go
import pyspark.pandas as ps
if isinstance(data, ps.DataFrame):
raise RuntimeError(
"plotly does not support a box plot with pandas-on-Spark DataFrame. Use Series instead."
)
# 'whis' isn't actually an argument in plotly (but in matplotlib). But seems like
# plotly doesn't expose the reach of the whiskers to the beyond the first and
# third quartiles (?). Looks they use default 1.5.
whis = kwargs.pop("whis", 1.5)
# 'precision' is pandas-on-Spark specific to control precision for approx_percentile
precision = kwargs.pop("precision", 0.01)
# Plotly options
boxpoints = kwargs.pop("boxpoints", "suspectedoutliers")
notched = kwargs.pop("notched", False)
if boxpoints not in ["suspectedoutliers", False]:
raise ValueError(
"plotly plotting backend does not support 'boxpoints' set to '%s'. "
"Set to 'suspectedoutliers' or False." % boxpoints
)
if notched:
raise ValueError(
"plotly plotting backend does not support 'notched' set to '%s'. "
"Set to False." % notched
)
colname = name_like_string(data.name)
spark_column_name = data._internal.spark_column_name_for(data._column_label)
# Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
fliers = None
if boxpoints:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
fliers = [fliers] if len(fliers) > 0 else None
fig = go.Figure()
fig.add_trace(
go.Box(
name=colname,
q1=[col_stats["q1"]],
median=[col_stats["med"]],
q3=[col_stats["q3"]],
mean=[col_stats["mean"]],
lowerfence=[whiskers[0]],
upperfence=[whiskers[1]],
y=fliers,
boxpoints=boxpoints,
notched=notched,
**kwargs, # this is for workarounds. Box takes different options from express.box.
)
)
fig["layout"]["xaxis"]["title"] = colname
fig["layout"]["yaxis"]["title"] = "value"
return fig
def plot_kde(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
from plotly import express
import pyspark.pandas as ps
if isinstance(data, ps.DataFrame) and "color" not in kwargs:
kwargs["color"] = "names"
psdf = KdePlotBase.prepare_kde_data(data)
sdf = psdf._internal.spark_frame
data_columns = psdf._internal.data_spark_columns
ind = KdePlotBase.get_ind(sdf.select(*data_columns), kwargs.pop("ind", None))
bw_method = kwargs.pop("bw_method", None)
pdfs = []
for label in psdf._internal.column_labels:
pdfs.append(
pd.DataFrame(
{
"Density": KdePlotBase.compute_kde(
sdf.select(psdf._internal.spark_column_for(label)),
ind=ind,
bw_method=bw_method,
),
"names": name_like_string(label),
"index": ind,
}
)
)
pdf = pd.concat(pdfs)
fig = express.line(pdf, x="index", y="Density", **kwargs)
fig["layout"]["xaxis"]["title"] = None
return fig
| apache-2.0 |
anddam/trading-with-python | nautilus/nautilus.py | 77 | 5403 | '''
Created on 26 dec. 2011
Copyright: Jev Kuznetsov
License: BSD
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ib.ext.Contract import Contract
from ib.opt import ibConnection
from ib.ext.Order import Order
import tradingWithPython.lib.logger as logger
from tradingWithPython.lib.eventSystem import Sender, ExampleListener
import tradingWithPython.lib.qtpandas as qtpandas
import numpy as np
import pandas
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
class PriceListener(qtpandas.DataFrameModel):
def __init__(self):
super(PriceListener,self).__init__()
self._header = ['position','bid','ask','last']
def addSymbol(self,symbol):
data = dict(zip(self._header,[0,np.nan,np.nan,np.nan]))
row = pandas.DataFrame(data, index = pandas.Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
def priceHandler(self,sender,event,msg=None):
if msg['symbol'] not in self.df.index:
self.addSymbol(msg['symbol'])
if msg['type'] in self._header:
self.df.ix[msg['symbol'],msg['type']] = msg['price']
self.signalUpdate()
#print self.df
class Broker(Sender):
def __init__(self, name = "broker"):
super(Broker,self).__init__()
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self._id2symbol = {} # id-> symbol dict
self.tws = None
self._nextId = 1 # tws subscription id
self.nextValidOrderId = None
def connect(self):
""" connect to tws """
self.tws = ibConnection() # tws interface
self.tws.registerAll(self._defaultHandler)
self.tws.register(self._nextValidIdHandler,'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True,'')
self.tws.register(self._priceHandler,'TickPrice')
def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'):
''' subscribe to stock data '''
self.log.debug('Subscribing to '+symbol)
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self._nextId
self._nextId += 1
self.tws.reqMktData(subId,c,'',False)
self._id2symbol[subId] = c.m_symbol
self.contracts[symbol]=c
def disconnect(self):
self.tws.disconnect()
#------event handlers--------------------
def _defaultHandler(self,msg):
''' default message handler '''
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def _nextValidIdHandler(self,msg):
self.nextValidOrderId = msg.orderId
self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId))
def _priceHandler(self,msg):
#translate to meaningful messages
message = {'symbol':self._id2symbol[msg.tickerId],
'price':msg.price,
'type':priceTicks[msg.field]}
self.dispatch('price',message)
#-----------------GUI elements-------------------------
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("print selected rows")
Action.triggered.connect(self.printName)
menu.exec_(event.globalPos())
def printName(self):
print "Action triggered from " + self.name
print 'Selected :'
for idx in self.selectionModel().selectedRows():
print self.model().df.ix[idx.row(),:]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.broker = Broker()
self.price = PriceListener()
self.broker.connect()
symbols = ['SPY','XLE','QQQ','VXX','XIV']
for symbol in symbols:
self.broker.subscribeStk(symbol)
self.broker.register(self.price.priceHandler, 'price')
widget = TableView(parent=self)
widget.setModel(self.price)
widget.horizontalHeader().setResizeMode(QHeaderView.Stretch)
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
def __del__(self):
print 'Disconnecting.'
self.broker.disconnect()
if __name__=="__main__":
print "Running nautilus"
import sys
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
print "All done." | bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
dsullivan7/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
belteshassar/cartopy | lib/cartopy/tests/mpl/test_gridliner.py | 2 | 6239 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import matplotlib as mpl
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
try:
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_raises
import numpy as np
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER
from cartopy.tests import _proj4_version
from cartopy.tests.mpl import ImageTesting
@ImageTesting(['gridliner1'])
def test_gridliner():
ny, nx = 2, 4
plt.figure(figsize=(10, 10))
ax = plt.subplot(nx, ny, 1, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 2, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 3, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), color='blue', linestyle='-')
ax.gridlines(ccrs.OSGB())
ax = plt.subplot(nx, ny, 4, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.NorthPolarStereo(), alpha=0.5,
linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 5, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 6, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
ax.gridlines(alpha=0.5, linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 7, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 8,
projection=ccrs.Robinson(central_longitude=135))
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), alpha=0.5, linewidth=1.5, linestyle='-')
delta = 1.5e-2
plt.subplots_adjust(left=0 + delta, right=1 - delta,
top=1 - delta, bottom=0 + delta)
def test_gridliner_specified_lines():
xs = [0, 60, 120, 180, 240, 360]
ys = [-90, -60, -30, 0, 30, 60, 90]
ax = mock.Mock(_gridliners=[], spec=GeoAxes)
gl = GeoAxes.gridlines(ax, xlocs=xs, ylocs=ys)
assert isinstance(gl.xlocator, mticker.FixedLocator)
assert isinstance(gl.ylocator, mticker.FixedLocator)
assert gl.xlocator.tick_values(None, None).tolist() == xs
assert gl.ylocator.tick_values(None, None).tolist() == ys
# The tolerance on this test is particularly high because of the high number
# of text objects. A new testing strategy is needed for this kind of test.
@ImageTesting(['gridliner_labels'
if mpl.__version__ >= '1.5' else
'gridliner_labels_pre_mpl_1.5'])
def test_grid_labels():
plt.figure(figsize=(8, 10))
crs_pc = ccrs.PlateCarree()
crs_merc = ccrs.Mercator()
crs_osgb = ccrs.OSGB()
ax = plt.subplot(3, 2, 1, projection=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that adding labels to Mercator gridlines gives an error.
# (Currently can only label PlateCarree gridlines.)
ax = plt.subplot(3, 2, 2,
projection=ccrs.PlateCarree(central_longitude=180))
ax.coastlines()
with assert_raises(TypeError):
ax.gridlines(crs=crs_merc, draw_labels=True)
ax.set_title('Known bug')
gl = ax.gridlines(crs=crs_pc, draw_labels=True)
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlines = False
ax = plt.subplot(3, 2, 3, projection=crs_merc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that labelling the gridlines on an OSGB plot gives an error.
# (Currently can only draw these on PlateCarree or Mercator plots.)
ax = plt.subplot(3, 2, 4, projection=crs_osgb)
ax.coastlines()
with assert_raises(TypeError):
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 4, projection=crs_pc)
ax.coastlines()
gl = ax.gridlines(
crs=crs_pc, linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_bottom = True
gl.ylabels_right = True
gl.xlines = False
gl.xlocator = mticker.FixedLocator([-180, -45, 45, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 15, 'color': 'gray'}
gl.xlabel_style = {'color': 'red'}
# trigger a draw at this point and check the appropriate artists are
# populated on the gridliner instance
FigureCanvasAgg(plt.gcf()).draw()
assert len(gl.xlabel_artists) == 4
assert len(gl.ylabel_artists) == 5
assert len(gl.ylabel_artists) == 5
assert len(gl.xline_artists) == 0
ax = plt.subplot(3, 2, 5, projection=crs_pc)
ax.set_extent([-20, 10.0, 45.0, 70.0])
ax.coastlines()
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 6, projection=crs_merc)
ax.set_extent([-20, 10.0, 45.0, 70.0], crs=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Increase margins between plots to stop them bumping into one another.
plt.subplots_adjust(wspace=0.25, hspace=0.25)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| gpl-3.0 |
chene5/Big-Data-in-Psychology | tutorial_3/3_svm_graph.py | 1 | 2332 | # -*- coding: utf-8 -*-
"""3_svm_graph.py
Example code for SVM classification of irises.
@author: Eric Chen
"""
from sklearn import datasets
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import numpy as np
# Import the Iris dataset.
iris = datasets.load_iris()
# Only use Petal length and Petal width features
# These are the training features.
X = iris.data[:, :2]
# These are the labeled outcomes, the species of iris.
# Iris setosa, Iris versicolour, and Iris virginica
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
lin_svc = LinearSVC(C=C).fit(X, y)
# Set up data to be amenable to plotting.
# Create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Get prediction from SVM.
prediction = lin_svc.predict(np.c_[xx.ravel(), yy.ravel()])
# Title for the plot
title = 'LinearSVC (linear kernel)'
# Labels for the plot
xlabel = 'Petal length'
ylabel = 'Petal width'
"""Generate a plot for the classifier prediction."""
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
# Put the result into a color plot
prediction = prediction.reshape(xx.shape)
plt.contourf(xx, yy, prediction, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
for i, point in enumerate(y):
if point == 0:
# setosa
seto = plt.scatter(X[:, 0][i], X[:, 1][i], c='k', s=100, marker='x')
elif point == 1:
# versicolor
vers = plt.scatter(X[:, 0][i], X[:, 1][i], c='b', s=100, marker='o')
else:
# virginica
virg = plt.scatter(X[:, 0][i], X[:, 1][i], c='r', s=100, marker='^')
plt.legend((seto, vers, virg),
('Iris setosa', 'Iris versicolor', 'Iris virginica'),
scatterpoints=1,
loc='lower left',
ncol=3,
fontsize=22)
plt.xlabel(xlabel, fontsize=24)
plt.ylabel(ylabel, fontsize=24)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(title, fontsize=24)
plt.show()
| mit |
ltiao/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
JsNoNo/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
jundongl/scikit-feast | skfeature/example/test_ll_l21.py | 3 | 1731 | import scipy.io
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import accuracy_score
from skfeature.utility.sparse_learning import *
from skfeature.function.sparse_learning_based import ll_l21
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
Y = construct_label_matrix_pan(y)
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the feature weight matrix
Weight, obj, value_gamma = ll_l21.proximal_gradient_descent(X[train], Y[train], 0.1, verbose=False)
# sort the feature scores in an ascending order according to the feature scores
idx = feature_ranking(Weight)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | gpl-2.0 |
rongzhen/FPLAPW-KP | examples/Ag/EV.py | 1 | 1093 | import numpy as nm
import libxml2
from array import *
from libxml2 import xmlAttr
import matplotlib.pyplot as plt
# read data
eVdoc = libxml2.parseFile("./eV.xml")
ctxt = eVdoc.xpathNewContext()
Volume=nm.array(map(float,map(xmlAttr.getContent,ctxt.xpathEval("//@volume"))))
totalEnergy=nm.array(map(float,map(xmlAttr.getContent,ctxt.xpathEval("//@totalEnergy"))))
# make quadratic fit
p=nm.polyfit(Volume,totalEnergy,2)
curve=nm.poly1d(p)
# find root of derivative to get minimum
minv=nm.roots(nm.polyder(p))
print 'minimum Volume '+str(minv)
print 'minimum energy at scale '+str(pow(minv/2,1./3.))
# x values for plotting polynomial
xa = nm.linspace(Volume[0],Volume[-1],100)
#plot
plt.figure(1)
plt.title('Ag Volume')
plt.ylabel(r'total energy in $[Hartree]$')
plt.xlabel(r'volume in $[Bohr]^3$')
plt.plot(xa,curve(xa),'-')
plt.plot(Volume,totalEnergy,'o')
plt.annotate('minimum Volume '+str(minv), xy=(minv,curve(minv)), xycoords='data' ,
xytext=(minv-7,curve(minv)+0.002) , arrowprops=dict(arrowstyle="->"))
plt.savefig('EV.png')
print 'plot saved as EV.png'
plt.show()
| lgpl-2.1 |
dsullivan7/scikit-learn | sklearn/cross_validation.py | 3 | 57208 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier)
def _check_cv(cv, X=None, y=None, classifier=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
tedunderwood/character | train_models/modelingprocess.py | 4 | 4706 | #!/usr/bin/env python3
# modelingprocess.py
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.preprocessing import StandardScaler
def remove_zerocols(trainingset, testset):
''' Remove all columns that sum to zero in the trainingset.
'''
columnsums = trainingset.sum(axis = 0)
columnstokeep = []
for i in range(len(columnsums)):
if columnsums[i] > 0:
columnstokeep.append(i)
trainingset = trainingset.iloc[ : , columnstokeep]
testset = testset.iloc[columnstokeep]
return trainingset, testset
def sliceframe(dataframe, yvals, excludedrows, testrow):
numrows = len(dataframe)
newyvals = list(yvals)
for i in excludedrows:
del newyvals[i]
# NB: This only works if we assume that excluded rows
# has already been sorted in descending order !!!!!!!
# otherwise indexes will slide around as you delete
trainingset = dataframe.drop(dataframe.index[excludedrows])
newyvals = np.array(newyvals)
testset = dataframe.iloc[testrow]
# Potential problem arises. What if some of these columns are
# all zero, because the associated word occurs in none of the
# documents still in the training matrix? An error will be
# thrown. To avoid this, we remove columns that sum to zero.
trainingset, testset = remove_zerocols(trainingset, testset)
return trainingset, newyvals, testset
def sliceframe_list(dataframe, yvals, excludedrows):
numrows = len(dataframe)
newyvals = np.array(yvals)
newyvals = np.delete(newyvals, excludedrows)
trainingset = dataframe.drop(dataframe.index[excludedrows])
testset = dataframe.iloc[excludedrows]
# trainingset, testset = remove_zerocols(trainingset, testset)
return trainingset, newyvals, testset
def normalizearray(featurearray, usedate):
'''Normalizes an array by centering on means and
scaling by standard deviations. Also returns the
means and standard deviations for features.
'''
numinstances, numfeatures = featurearray.shape
means = list()
stdevs = list()
lastcolumn = numfeatures - 1
for featureidx in range(numfeatures):
thiscolumn = featurearray.iloc[ : , featureidx]
thismean = np.mean(thiscolumn)
thisstdev = np.std(thiscolumn)
if (not usedate) or featureidx != lastcolumn:
# If we're using date we don't normalize the last column.
means.append(thismean)
stdevs.append(thisstdev)
featurearray.iloc[ : , featureidx] = (thiscolumn - thismean) / thisstdev
else:
print('FLAG')
means.append(thismean)
thisstdev = 0.1
stdevs.append(thisstdev)
featurearray.iloc[ : , featureidx] = (thiscolumn - thismean) / thisstdev
# We set a small stdev for date.
return featurearray, means, stdevs
def model_one_volume(data5tuple):
data, classvector, listtoexclude, i, usedate, regularization = data5tuple
trainingset, yvals, testset = sliceframe(data, classvector, listtoexclude, i)
newmodel = LogisticRegression(C = regularization)
trainingset, means, stdevs = normalizearray(trainingset, usedate)
newmodel.fit(trainingset, yvals)
testset = (testset - means) / stdevs
testset = testset.reshape(1, -1)
prediction = newmodel.predict_proba(testset)[0][1]
if i % 50 == 0:
print(i)
# print(str(i) + " - " + str(len(listtoexclude)))
return prediction
def model_volume_list(data5tuple):
data, classvector, idstomodel, indicestomodel, regularization = data5tuple
trainingset, yvals, testset = sliceframe_list(data, classvector, indicestomodel)
newmodel = LogisticRegression(C = regularization)
stdscaler = StandardScaler()
stdscaler.fit(trainingset)
scaledtraining = stdscaler.transform(trainingset)
newmodel.fit(scaledtraining, yvals)
scaledtest = stdscaler.transform(testset)
predictions = [x[1] for x in newmodel.predict_proba(scaledtest)]
return predictions
def svm_model(data5tuple):
data, classvector, idstomodel, indicestomodel, regularization = data5tuple
trainingset, yvals, testset = sliceframe_list(data, classvector, indicestomodel)
trainingset, means, stdevs = normalizearray(trainingset, False)
supportvector = svm.SVC(C = regularization, kernel = 'linear', probability = True)
supportvector.fit(trainingset, yvals)
testset = (testset - means) / stdevs
predictions = supportvector.predict(testset)
probabilities = [x[1] for x in supportvector.predict_proba(testset)]
return probabilities
| mit |
gergopokol/renate-od | cherab_demos/deuterium_beam.py | 1 | 3842 |
import numpy as np
import matplotlib.pyplot as plt
from raysect.core import Point3D, Vector3D, translate, rotate_basis
from raysect.optical import World
from raysect.optical.observer import PinholeCamera, SightLine, PowerPipeline0D, SpectralPowerPipeline0D
from cherab.core.math import ConstantVector3D
from cherab.core.atomic import hydrogen, Line
from cherab.tools.plasmas.slab import build_slab_plasma
from renate.cherab_models import RenateBeamEmissionLine, RenateBeam
world = World()
# PLASMA ----------------------------------------------------------------------
plasma = build_slab_plasma(peak_density=5e19, world=world)
plasma.b_field = ConstantVector3D(Vector3D(0, 0.6, 0))
# BEAM SETUP ------------------------------------------------------------------
integration_step = 0.0025
beam_transform = translate(-0.5, 0.0, 0) * rotate_basis(Vector3D(1, 0, 0), Vector3D(0, 0, 1))
line = Line(hydrogen, 0, (3, 2))
beam = RenateBeam(parent=world, transform=beam_transform)
beam.plasma = plasma
beam.energy = 100000
beam.power = 3e6
beam.element = hydrogen
beam.temperature = 30
beam.sigma = 0.05
beam.divergence_x = 0.
beam.divergence_y = 0.
beam.length = 3.0
beam.models = [RenateBeamEmissionLine(line)]
beam.integrator.step = integration_step
beam.integrator.min_samples = 10
beam2 = RenateBeam(parent=world, transform=beam_transform)
beam2.plasma = plasma
beam2.energy = 60000
beam2.power = 3e6
beam2.element = hydrogen
beam2.temperature = 30
beam2.sigma = 0.05
beam2.divergence_x = 0.5
beam2.divergence_y = 0.5
beam2.length = 3.0
beam2.models = [RenateBeamEmissionLine(line)]
beam2.integrator.step = integration_step
beam2.integrator.min_samples = 10
# line of sight settings
los_start = Point3D(1.5, -1, 0)
los_target = Point3D(0.5, 0, 0)
los_direction = los_start.vector_to(los_target).normalise()
beam_density = np.empty((200, 200))
beam_density2 = np.empty((200, 200))
xpts = np.linspace(-1, 2, 200)
ypts = np.linspace(-1, 1, 200)
for i, xpt in enumerate(xpts):
for j, ypt in enumerate(ypts):
pt = Point3D(xpt, ypt, 0).transform(beam.to_local())
beam_density[i, j] = beam.density(pt.x, pt.y, pt.z)
beam_density2[i, j] = beam2.density(pt.x, pt.y, pt.z)
plt.figure()
plt.imshow(np.transpose(np.squeeze(beam_density)), extent=[-1, 2, -1, 1], origin='lower')
plt.plot([los_start.x, los_target.x], [los_start.y, los_target.y], 'k')
plt.colorbar()
plt.axis('equal')
plt.xlabel('x axis (beam coords)')
plt.ylabel('z axis (beam coords)')
plt.title("Beam full energy density profile in r-z plane")
z = np.linspace(0, 3, 200)
beam_full_densities = [beam.density(0, 0, zz) for zz in z]
beam_half_densities = [beam2.density(0, 0, zz) for zz in z]
plt.figure()
plt.plot(z, beam_full_densities, label="full energy")
plt.plot(z, beam_half_densities, label="half energy")
plt.xlabel('z axis (beam coords)')
plt.ylabel('beam component density [m^-3]')
plt.title("Beam attenuation by energy component")
plt.legend()
# OBSERVATIONS ----------------------------------------------------------------
camera = PinholeCamera((128, 128), parent=world, transform=translate(1.25, -3.5, 0) * rotate_basis(Vector3D(0, 1, 0), Vector3D(0, 0, 1)))
camera.spectral_rays = 1
camera.spectral_bins = 15
camera.pixel_samples = 5
# turning off parallisation because this causes issues with the way RENATE currently loads atomic data
from raysect.core.workflow import SerialEngine
camera.render_engine = SerialEngine()
plt.ion()
camera.observe()
power = PowerPipeline0D(accumulate=False)
spectral_power = SpectralPowerPipeline0D()
los = SightLine(pipelines=[power, spectral_power], min_wavelength=640, max_wavelength=665,
parent=world, transform=translate(*los_start) * rotate_basis(los_direction, Vector3D(0, 0, 1)))
los.pixel_samples = 1
los.spectral_bins = 2000
los.observe()
plt.ioff()
plt.show()
| lgpl-3.0 |
petosegan/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 5 | 9922 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_labels = len(le.classes_)
n_samples = X.shape[0]
check_number_of_labels(n_labels, n_samples)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
le = LabelEncoder()
labels = le.fit_transform(labels)
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.ones(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf * intra_clust_dists
for curr_label in unique_labels:
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = np.sum(mask) - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in unique_labels:
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/legend_handler.py | 11 | 21027 | """
This module defines default legend handlers.
It is strongly encouraged to have read the :ref:`legend guide
<plotting-guide-legend>` before this documentation.
Legend handlers are expected to be a callable object with a following
signature. ::
legend_handler(legend, orig_handle, fontsize, handlebox)
Where *legend* is the legend itself, *orig_handle* is the original
plot, *fontsize* is the fontsize in pixles, and *handlebox* is a
OffsetBox instance. Within the call, you should create relevant
artists (using relevant properties from the *legend* and/or
*orig_handle*) and add them into the handlebox. The artists needs to
be scaled according to the fontsize (note that the size is in pixel,
i.e., this is dpi-scaled value).
This module includes definition of several legend handler classes
derived from the base class (HandlerBase) with the following method.
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import numpy as np
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
import matplotlib.collections as mcoll
def update_from_first_child(tgt, src):
tgt.update_from(src.get_children()[0])
class HandlerBase(object):
"""
A Base class for default legend handlers.
The derived classes are meant to override *create_artists* method, which
has a following signature.::
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
The overridden method needs to create artists of the given
transform that fits in the given dimension (xdescent, ydescent,
width, height) that are scaled by fontsize if necessary.
"""
def __init__(self, xpad=0., ypad=0., update_func=None):
self._xpad, self._ypad = xpad, ypad
self._update_prop_func = update_func
def _update_prop(self, legend_handle, orig_handle):
if self._update_prop_func is None:
self._default_update_prop(legend_handle, orig_handle)
else:
self._update_prop_func(legend_handle, orig_handle)
def _default_update_prop(self, legend_handle, orig_handle):
legend_handle.update_from(orig_handle)
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def adjust_drawing_area(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
):
xdescent = xdescent - self._xpad * fontsize
ydescent = ydescent - self._ypad * fontsize
width = width - self._xpad * fontsize
height = height - self._ypad * fontsize
return xdescent, ydescent, width, height
def legend_artist(self, legend, orig_handle,
fontsize, handlebox):
"""
Return the artist that this HandlerBase generates for the given
original artist/handle.
Parameters
----------
legend : :class:`matplotlib.legend.Legend` instance
The legend for which these legend artists are being created.
orig_handle : :class:`matplotlib.artist.Artist` or similar
The object for which these legend artists are being created.
fontsize : float or int
The fontsize in pixels. The artists being created should
be scaled according to the given fontsize.
handlebox : :class:`matplotlib.offsetbox.OffsetBox` instance
The box which has been created to hold this legend entry's
artists. Artists created in the `legend_artist` method must
be added to this handlebox inside this method.
"""
xdescent, ydescent, width, height = self.adjust_drawing_area(
legend, orig_handle,
handlebox.xdescent, handlebox.ydescent,
handlebox.width, handlebox.height,
fontsize)
artists = self.create_artists(legend, orig_handle,
xdescent, ydescent, width, height,
fontsize, handlebox.get_transform())
# create_artists will return a list of artists.
for a in artists:
handlebox.add_artist(a)
# we only return the first artist
return artists[0]
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
raise NotImplementedError('Derived must override')
class HandlerNpoints(HandlerBase):
def __init__(self, marker_pad=0.3, numpoints=None, **kw):
HandlerBase.__init__(self, **kw)
self._numpoints = numpoints
self._marker_pad = marker_pad
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.numpoints
else:
return self._numpoints
def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):
numpoints = self.get_numpoints(legend)
if numpoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(-xdescent + self._marker_pad * fontsize,
width - self._marker_pad * fontsize,
numpoints)
xdata_marker = xdata
elif numpoints == 1:
xdata = np.linspace(-xdescent, width, 2)
xdata_marker = [0.5 * width - 0.5 * xdescent]
return xdata, xdata_marker
class HandlerNpointsYoffsets(HandlerNpoints):
def __init__(self, numpoints=None, yoffsets=None, **kw):
HandlerNpoints.__init__(self, numpoints=numpoints, **kw)
self._yoffsets = yoffsets
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * legend._scatteryoffsets
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
class HandlerLine2D(HandlerNpoints):
"""
Handler for Line2D instances.
"""
def __init__(self, marker_pad=0.3, numpoints=None, **kw):
HandlerNpoints.__init__(self, marker_pad=marker_pad, numpoints=numpoints, **kw)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_drawstyle('default')
legline.set_marker("")
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(legline_marker, orig_handle, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correspondence.
legline._legmarker = legline_marker
legline.set_transform(trans)
legline_marker.set_transform(trans)
return [legline, legline_marker]
class HandlerPatch(HandlerBase):
"""
Handler for Patch instances.
"""
def __init__(self, patch_func=None, **kw):
"""
The HandlerPatch class optionally takes a function ``patch_func``
who's responsibility is to create the legend key artist. The
``patch_func`` should have the signature::
def patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
Subsequently the created artist will have its ``update_prop`` method
called and the appropriate transform will be applied.
"""
HandlerBase.__init__(self, **kw)
self._patch_func = patch_func
def _create_patch(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._patch_func is None:
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
else:
p = self._patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
p = self._create_patch(legend, orig_handle,
xdescent, ydescent, width, height, fontsize)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
class HandlerLineCollection(HandlerLine2D):
"""
Handler for LineCollection instances.
"""
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def _default_update_prop(self, legend_handle, orig_handle):
lw = orig_handle.get_linewidth()[0]
dashes = orig_handle.get_dashes()[0]
color = orig_handle.get_colors()[0]
legend_handle.set_color(color)
legend_handle.set_linewidth(lw)
if dashes[0] is not None: # dashed line
legend_handle.set_dashes(dashes[1])
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_transform(trans)
return [legline]
class HandlerRegularPolyCollection(HandlerNpointsYoffsets):
"""
Handler for RegularPolyCollections.
"""
def __init__(self, yoffsets=None, sizes=None, **kw):
HandlerNpointsYoffsets.__init__(self, yoffsets=yoffsets, **kw)
self._sizes = sizes
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def get_sizes(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._sizes is None:
size_max = max(orig_handle.get_sizes()) * legend.markerscale ** 2
size_min = min(orig_handle.get_sizes()) * legend.markerscale ** 2
numpoints = self.get_numpoints(legend)
if numpoints < 4:
sizes = [.5 * (size_max + size_min), size_max,
size_min]
else:
rng = (size_max - size_min)
sizes = rng * np.linspace(0, 1, numpoints) + size_min
else:
sizes = self._sizes
return sizes
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend_handle.set_figure(legend.figure)
#legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)(orig_handle.get_numsides(),
rotation=orig_handle.get_rotation(),
sizes=sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,
width, height, fontsize)
p = self.create_collection(orig_handle, sizes,
offsets=list(zip(xdata_marker, ydata)),
transOffset=trans)
self.update_prop(p, orig_handle, legend)
p._transOffset = trans
return [p]
class HandlerPathCollection(HandlerRegularPolyCollection):
"""
Handler for PathCollections, which are used by scatter
"""
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)([orig_handle.get_paths()[0]],
sizes=sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
class HandlerCircleCollection(HandlerRegularPolyCollection):
"""
Handler for CircleCollections
"""
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)(sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
class HandlerErrorbar(HandlerLine2D):
"""
Handler for Errorbars
"""
def __init__(self, xerr_size=0.5, yerr_size=None,
marker_pad=0.3, numpoints=None, **kw):
self._xerr_size = xerr_size
self._yerr_size = yerr_size
HandlerLine2D.__init__(self, marker_pad=marker_pad, numpoints=numpoints,
**kw)
def get_err_size(self, legend, xdescent, ydescent, width, height, fontsize):
xerr_size = self._xerr_size * fontsize
if self._yerr_size is None:
yerr_size = xerr_size
else:
yerr_size = self._yerr_size * fontsize
return xerr_size, yerr_size
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
plotlines, caplines, barlinecols = orig_handle
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
xdata_marker = np.asarray(xdata_marker)
ydata_marker = np.asarray(ydata[:len(xdata_marker)])
xerr_size, yerr_size = self.get_err_size(legend, xdescent, ydescent,
width, height, fontsize)
legline_marker = Line2D(xdata_marker, ydata_marker)
# when plotlines are None (only errorbars are drawn), we just
# make legline invisible.
if plotlines is None:
legline.set_visible(False)
legline_marker.set_visible(False)
else:
self.update_prop(legline, plotlines, legend)
legline.set_drawstyle('default')
legline.set_marker('None')
self.update_prop(legline_marker, plotlines, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
handle_barlinecols = []
handle_caplines = []
if orig_handle.has_xerr:
verts = [ ((x - xerr_size, y), (x + xerr_size, y))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker - xerr_size, ydata_marker)
capline_right = Line2D(xdata_marker + xerr_size, ydata_marker)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("|")
capline_right.set_marker("|")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
if orig_handle.has_yerr:
verts = [ ((x, y - yerr_size), (x, y + yerr_size))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker, ydata_marker - yerr_size)
capline_right = Line2D(xdata_marker, ydata_marker + yerr_size)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("_")
capline_right.set_marker("_")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
artists = []
artists.extend(handle_barlinecols)
artists.extend(handle_caplines)
artists.append(legline)
artists.append(legline_marker)
for artist in artists:
artist.set_transform(trans)
return artists
class HandlerStem(HandlerNpointsYoffsets):
"""
Handler for Errorbars
"""
def __init__(self, marker_pad=0.3, numpoints=None,
bottom=None, yoffsets=None, **kw):
HandlerNpointsYoffsets.__init__(self, marker_pad=marker_pad,
numpoints=numpoints,
yoffsets=yoffsets,
**kw)
self._bottom = bottom
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * (0.5 * legend._scatteryoffsets + 0.5)
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
markerline, stemlines, baseline = orig_handle
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
if self._bottom is None:
bottom = 0.
else:
bottom = self._bottom
leg_markerline = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(leg_markerline, markerline, legend)
leg_stemlines = []
for thisx, thisy in zip(xdata_marker, ydata):
l = Line2D([thisx, thisx], [bottom, thisy])
leg_stemlines.append(l)
for lm, m in zip(leg_stemlines, stemlines):
self.update_prop(lm, m, legend)
leg_baseline = Line2D([np.amin(xdata), np.amax(xdata)],
[bottom, bottom])
self.update_prop(leg_baseline, baseline, legend)
artists = [leg_markerline]
artists.extend(leg_stemlines)
artists.append(leg_baseline)
for artist in artists:
artist.set_transform(trans)
return artists
class HandlerTuple(HandlerBase):
"""
Handler for Tuple
"""
def __init__(self, **kwargs):
HandlerBase.__init__(self, **kwargs)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
handler_map = legend.get_legend_handler_map()
a_list = []
for handle1 in orig_handle:
handler = legend.get_legend_handler(handler_map, handle1)
_a_list = handler.create_artists(legend, handle1,
xdescent, ydescent, width, height,
fontsize,
trans)
a_list.extend(_a_list)
return a_list
| mit |
CharlesGulian/Deconv | linreg_test.py | 1 | 3817 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 29 19:03:41 2016
@author: charlesgulian
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress as linreg
def analyze(x,y,output_file):
# =======================================================================
# Splitting up data into N different brightness regimes
N = 5 # Number of brightness regimes (each corresponding to adjacent partitions of width b*std(flux1) of the flux1 distribution; final partition is all points greater than (N-1)*std(flux1))
'''b = 0.05 # Controls width of partitions
regimes = {}
for i in range(N-1):
regimes[i+1] = []
inds1 = np.where(flux1 <= (i+1)*b*np.std(flux1))[0]
inds2 = np.where(flux1 > (i)*b*np.std(flux1))[0]
inds = np.intersect1d(inds1,inds2)
regimes[i+1].extend(inds)
regimes[N] = []
inds = np.where(flux1 > (N-1)*b*np.std(flux1))[0]
regimes[N].extend(inds)
#'''
inds_unsorted = np.arange(len(x))
x_sorted,inds_sorted = zip(*sorted(zip(x,inds_unsorted)))
L = len(x)
bin_size = L/N
regimes = {}
regimes[0] = []
inds = np.arange(len(x))
regimes[0].extend(inds) # Oth regime corresponds to original (full) dataset
for i in range(N-1):
regimes[i+1] = []
inds = np.array(inds_sorted[i*bin_size:(i+1)*bin_size-1])
regimes[i+1].extend(inds)
regimes[N] = []
inds = np.array(inds_sorted[(N-1)*bin_size::])
regimes[N].extend(inds)
# =======================================================================
# Testing null hypothesis that data has slope m = 1.0
residual = y - x
y = residual
# =======================================================================
# Writing output file
with open(output_file,'w') as g:
g.write('# Linear regression analysis results\n')
g.write('# Columns: Slope | Intercept | r-value | p-value \n')
g.write('\n')
for i in range(N+1):
# =======================================================================
# Slice ith brightness regime of data
xr,yr = x[regimes[i]],y[regimes[i]]
# =======================================================================
# Do linear regression analysis with data in x,y
slope, intercept, r_value, p_value, std_err = linreg(xr,yr)
# =======================================================================
# Write data to output file
xl,xh = xr[0],xr[len(xr)-1] # Bounds of brightness regime
g.write('# ({0}) Results for data in x-axis interval {1}:{2} (n={3})\n'.format(i+1,xl,xh,len(regimes[i])))
g.write('{0} {1} {2} {3}\n'.format(slope,intercept,r_value,p_value))
if p_value < 0.05:
g.write('# Statistically significant deviation from slope of 1.0\n')
g.write('\n')
'''
noise1 = (0.1*(np.random.rand(400)-0.5))
noise2 = 0.2*np.random.randn(400)
artifact1 = np.zeros(400)
artifact1[0:56] = -0.6*np.linspace(0.0,1.0,56)+0.9
for i in range(5):
curr_dir = os.getcwd()
output_file = os.path.join(curr_dir,'Results','LinRegAnalysis','test{0}.txt'.format(i+1))
# Artificial data
m = 1.0 + 0.025*i # Slope of data
x = np.linspace(4.0,16.0,400) + noise1
y = m*x + noise2 + 0.5
xsort,ysort = zip(*sorted(zip(x,y)))
x,y = np.array(xsort),np.array(ysort)
y += artifact1
SHOW = False
if SHOW:
plt.plot(np.linspace(0.0,18,400),np.linspace(0.0,18,400),'b--')
plt.scatter(x,y,c='m',linewidth=0.1)
plt.axis([0.0,19.0,0.0,19.0])
plt.show()
analyze(x,y,output_file)
#''' | gpl-3.0 |
OpenSourcePolicyCenter/dynamic | cs-config/cs_config/helpers.py | 1 | 2175 | """
Functions used to help OG-USA configure to COMP
"""
try:
import boto3
except ImportError:
boto3 = None
import gzip
import pandas as pd
from taxcalc import Policy
from collections import defaultdict
TC_LAST_YEAR = Policy.LAST_BUDGET_YEAR
POLICY_SCHEMA = {
"labels": {
"year": {
"type": "int",
"validators": {
"choice": {
"choices": [
yr for yr in range(2013, TC_LAST_YEAR + 1)
]
}
}
},
"MARS": {
"type": "str",
"validators": {"choice": {"choices": ["single", "mjoint",
"mseparate", "headhh",
"widow"]}}
},
"idedtype": {
"type": "str",
"validators": {"choice": {"choices": ["med", "sltx", "retx", "cas",
"misc", "int", "char"]}}
},
"EIC": {
"type": "str",
"validators": {"choice": {"choices": ["0kids", "1kid",
"2kids", "3+kids"]}}
},
"data_source": {
"type": "str",
"validators": {"choice": {"choices": ["PUF", "CPS", "other"]}}
}
},
"additional_members": {
"section_1": {"type": "str"},
"section_2": {"type": "str"},
"start_year": {"type": "int"},
"checkbox": {"type": "bool"}
}
}
def retrieve_puf(aws_access_key_id, aws_secret_access_key):
"""
Function for retrieving the PUF from the OSPC S3 bucket
"""
has_credentials = aws_access_key_id and aws_secret_access_key
if has_credentials and boto3 is not None:
client = boto3.client(
"s3",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
obj = client.get_object(Bucket="ospc-data-files", Key="puf.csv.gz")
gz = gzip.GzipFile(fileobj=obj["Body"])
puf_df = pd.read_csv(gz)
return puf_df
else:
return None
| mit |
rrohan/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
xiaohan2012/temporal-topic-mining | lda_test.py | 1 | 2376 | import lda
import itertools
import numpy as np
import codecs
from temporal import doc_topic_strengths_over_periods
from sklearn.feature_extraction.text import CountVectorizer
from mynlp.preprocess import (transform, ALL_PIPELINE_NAMES)
from util import load_line_corpus
def main():
# parameters
collection_name = "nips"
years = xrange(2008, 2015) # 10 ~ 14
n_topics = 6
n_top_words = 15
# load corpus
corpus_paths = map(lambda y:
"data/{}-{}.dat".format(collection_name, y),
years)
all_corpus = []
year2corpus = {}
for year, path in zip(years, corpus_paths):
corpus = list(load_line_corpus(path))
all_corpus.append(corpus)
year2corpus[year] = corpus
all_corpus = list(itertools.chain.from_iterable(all_corpus))
preprocessor = lambda doc: ' '.join(transform(doc, ALL_PIPELINE_NAMES))
tokenizer = lambda doc: doc.split()
with codecs.open('data/lemur-stopwords.txt',
'r' 'utf8') as f:
stop_words = map(lambda s: s.strip(), f.readlines())
vectorizer = CountVectorizer(preprocessor=preprocessor,
tokenizer=tokenizer,
stop_words=stop_words,
min_df=5)
X = vectorizer.fit_transform(all_corpus)
id2word = {id_: word
for word, id_ in vectorizer.vocabulary_.items()}
# build the model
model = lda.LDA(n_topics=n_topics, n_iter=700,
# alpha=1.0, eta=1.0,
random_state=1)
model.fit(X)
# print topics
for i, topic_dist in enumerate(model.topic_word_):
top_word_ids = np.argsort(topic_dist)[:-n_top_words:-1]
topic_words = [id2word[id_] for id_ in top_word_ids]
print('Topic {}: {}'.format(i, ' '.join(topic_words)))
year2docs = {}
start_document_index = 0
for year in years:
corpus_size = len(year2corpus[year])
end_document_index = start_document_index + corpus_size
year2docs[year] = np.arange(start_document_index, end_document_index)
start_document_index = end_document_index
tbl = doc_topic_strengths_over_periods(model.doc_topic_, year2docs)
print tbl
print np.array(tbl.values())
if __name__ == "__main__":
main()
| mit |
jreback/pandas | pandas/tests/io/parser/test_common.py | 1 | 67821 | """
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from inspect import signature
from io import BytesIO, StringIO
import os
import platform
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.compat import is_platform_linux
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat, option_context
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
with parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
) as reader:
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
{},
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
{"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
{},
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
{"true_values": ["foo"], "false_values": ["bar"]},
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(StringIO(data), chunksize=chunksize) as _:
pass
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0, "nrows": 5}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0, "nrows": 5}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
with parser.read_csv(StringIO(data), chunksize=2) as reader:
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = {"index_col": 0}
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
with TextParser(data_list, chunksize=2, **kwargs) as parser:
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
with parser.read_csv(StringIO(data), iterator=True) as reader:
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
with parser.read_csv(StringIO(data), chunksize=1) as reader:
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for iteration"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:
pass
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
{"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
{"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = {"sep": "\t"}
url = (
"https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = {"sep": "\t"}
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = f"{tm.rands(10)}.csv"
msg = r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
@td.skip_if_windows # os.chmod does not work in windows
def test_no_permission(all_parsers):
# GH 23784
parser = all_parsers
msg = r"\[Errno 13\]"
with tm.ensure_clean() as path:
os.chmod(path, 0) # make file unreadable
# verify that this process cannot open the file (not running as sudo)
try:
with open(path):
pass
pytest.skip("Running as sudo.")
except PermissionError:
pass
with pytest.raises(PermissionError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH38630, sometimes gives ResourceWarning", strict=False)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(all_parsers_all_precisions):
# see gh-12215
df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
data = df.to_csv(index=False)
parser, precision = all_parsers_all_precisions
df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
tm.assert_frame_equal(df_roundtrip, df)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
"00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166",
],
columns=["ID"],
)
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
)
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
)
def test_outside_int64_uint64_range(all_parsers, val):
# These numbers fall just outside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("neg_exp", [-617, -100000, -99999999999999999])
def test_very_negative_exponent(all_parsers_all_precisions, neg_exp):
# GH#38753
parser, precision = all_parsers_all_precisions
data = f"data\n10E{neg_exp}"
result = parser.read_csv(StringIO(data), float_precision=precision)
expected = DataFrame({"data": [0.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999])
def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):
# GH#38753
parser, precision = all_parsers_all_precisions
data = f"data\n10E{exp}"
result = parser.read_csv(StringIO(data), float_precision=precision)
if precision == "round_trip":
if exp == 999999999999999999 and is_platform_linux():
mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
request.node.add_marker(mark)
value = np.inf if exp > 0 else 0.0
expected = DataFrame({"data": [value]})
else:
expected = DataFrame({"data": [f"10E{exp}"]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
with parser.read_csv(data, chunksize=nrows) as reader:
result = next(iter(reader))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected,msg",
[
# gh-10728: WHITESPACE_LINE
(
"a,b,c\n4,5,6\n ",
{},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# gh-10548: EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
{"comment": "#"},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL_NOP
(
"a,b,c\n4,5,6\n\r",
{},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_COMMENT
(
"a,b,c\n4,5,6#comment",
{"comment": "#"},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# SKIP_LINE
(
"a,b,c\n4,5,6\nskipme",
{"skiprows": [2]},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
{"comment": "#", "skip_blank_lines": False},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# IN_FIELD
(
"a,b,c\n4,5,6\n ",
{"skip_blank_lines": False},
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL
(
"a,b,c\n4,5,6\n\r",
{"skip_blank_lines": False},
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
None,
),
# ESCAPED_CHAR
(
"a,b,c\n4,5,6\n\\",
{"escapechar": "\\"},
None,
"(EOF following escape character)|(unexpected end of data)",
),
# ESCAPE_IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"\\',
{"escapechar": "\\"},
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
# IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"',
{"escapechar": "\\"},
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
],
ids=[
"whitespace-line",
"eat-line-comment",
"eat-crnl-nop",
"eat-comment",
"skip-line",
"eat-line-comment",
"in-field",
"eat-crnl",
"escaped-char",
"escape-in-quoted-field",
"in-quoted-field",
],
)
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", {}, None),
("", {"usecols": ["X"]}, None),
(
",,",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
{
"header": None,
"delim_whitespace": True,
"skiprows": [0, 1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
{
"delim_whitespace": True,
"skiprows": [1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_verbose_read(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
parser.read_csv(StringIO(data), verbose=True)
captured = capsys.readouterr()
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 3 NA values in column a\n"
def test_verbose_read2(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
parser.read_csv(StringIO(data), verbose=True, index_col=0)
captured = capsys.readouterr()
# Engines are verbose in different ways.
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 1 NA values in column a\n"
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = {"squeeze": True, "header": None}
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path) as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,thousands,decimal",
[
(
"""A|B|C
1|2,334.01|5
10|13|10.
""",
",",
".",
),
(
"""A|B|C
1|2.334,01|5
10|13|10,
""",
".",
",",
),
],
)
def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
parser = all_parsers
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
result = parser.read_csv(
StringIO(data), sep="|", thousands=thousands, decimal=decimal
)
tm.assert_frame_equal(result, expected)
def test_euro_decimal_format(all_parsers):
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
result = parser.read_csv(StringIO(data), sep=";", decimal=",")
expected = DataFrame(
[
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
],
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_inf_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
expected = DataFrame(
{"A": [float("inf"), float("-inf")] * 5},
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_infinity_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,Infinity
b,-Infinity
c,+Infinity
"""
expected = DataFrame(
{"A": [float("infinity"), float("-infinity"), float("+infinity")]},
index=["a", "b", "c"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
data = "\n" * nrows
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data))
@td.check_file_leaks
def test_memory_map(all_parsers, csv_dir_path):
mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
parser = all_parsers
expected = DataFrame(
{"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
)
result = parser.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(result, expected)
def test_null_byte_char(all_parsers):
# see gh-2741
data = "\x00,foo"
names = ["a", "b"]
parser = all_parsers
if parser.engine == "c":
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
def test_temporary_file(all_parsers):
# see gh-13398
parser = all_parsers
data = "0 0"
with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
new_file.write(data)
new_file.flush()
new_file.seek(0)
result = parser.read_csv(new_file, sep=r"\s+", header=None)
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
# see gh-5500
parser = all_parsers
data = "a,b\n1\x1a,2"
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(all_parsers):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
path = f"__{tm.rands(10)}__.csv"
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
def test_file_handle_string_io(all_parsers):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
data = "a,b\n1,2"
fh = StringIO(data)
parser.read_csv(fh)
assert not fh.closed
def test_file_handles_with_open(all_parsers, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
for mode in ["r", "rb"]:
with open(csv1, mode) as f:
parser.read_csv(f)
assert not f.closed
def test_invalid_file_buffer_class(all_parsers):
# see gh-15337
class InvalidBuffer:
pass
parser = all_parsers
msg = "Invalid file path or buffer object type"
with pytest.raises(ValueError, match=msg):
parser.read_csv(InvalidBuffer())
def test_invalid_file_buffer_mock(all_parsers):
# see gh-15337
parser = all_parsers
msg = "Invalid file path or buffer object type"
class Foo:
pass
with pytest.raises(ValueError, match=msg):
parser.read_csv(Foo())
def test_valid_file_buffer_seems_invalid(all_parsers):
# gh-16135: we want to ensure that "tell" and "seek"
# aren't actually being used when we call `read_csv`
#
# Thus, while the object may look "invalid" (these
# methods are attributes of the `StringIO` class),
# it is still a valid file-object for our purposes.
class NoSeekTellBuffer(StringIO):
def tell(self):
raise AttributeError("No tell method")
def seek(self, pos, whence=0):
raise AttributeError("No seek method")
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoSeekTellBuffer(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs",
[{}, {"error_bad_lines": True}], # Default is True. # Explicitly pass in.
)
@pytest.mark.parametrize(
"warn_kwargs", [{}, {"warn_bad_lines": True}, {"warn_bad_lines": False}]
)
def test_error_bad_lines(all_parsers, kwargs, warn_kwargs):
# see gh-15925
parser = all_parsers
kwargs.update(**warn_kwargs)
data = "a\n1\n1,2,3\n4\n5,6,7"
msg = "Expected 1 fields in line 3, saw 3"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_warn_bad_lines(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
assert "Skipping line 5" in captured.err
def test_suppress_error_output(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(
StringIO(data), error_bad_lines=False, warn_bad_lines=False
)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert captured.err == ""
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with tm.ensure_clean(filename) as path:
df.to_csv(path, index=False)
result = parser.read_csv(path)
tm.assert_frame_equal(result, df)
def test_read_csv_memory_growth_chunksize(all_parsers):
# see gh-24805
#
# Let's just make sure that we don't crash
# as we iteratively process all chunks.
parser = all_parsers
with tm.ensure_clean() as path:
with open(path, "w") as f:
for i in range(1000):
f.write(str(i) + "\n")
with parser.read_csv(path, chunksize=20) as result:
for _ in result:
pass
def test_read_csv_raises_on_header_prefix(all_parsers):
# gh-27394
parser = all_parsers
msg = "Argument prefix must be None if argument header is not None"
s = StringIO("0,1\n2,3")
with pytest.raises(ValueError, match=msg):
parser.read_csv(s, header=0, prefix="_X")
def test_unexpected_keyword_parameter_exception(all_parsers):
# GH-34976
parser = all_parsers
msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg.format("read_csv")):
parser.read_csv("foo.csv", foo=1)
with pytest.raises(TypeError, match=msg.format("read_table")):
parser.read_table("foo.tsv", foo=1)
def test_read_table_same_signature_as_read_csv(all_parsers):
# GH-34976
parser = all_parsers
table_sign = signature(parser.read_table)
csv_sign = signature(parser.read_csv)
assert table_sign.parameters.keys() == csv_sign.parameters.keys()
assert table_sign.return_annotation == csv_sign.return_annotation
for key, csv_param in csv_sign.parameters.items():
table_param = table_sign.parameters[key]
if key == "sep":
assert csv_param.default == ","
assert table_param.default == "\t"
assert table_param.annotation == csv_param.annotation
assert table_param.kind == csv_param.kind
continue
else:
assert table_param == csv_param
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_first_row_bom(all_parsers):
# see gh-26545
parser = all_parsers
data = '''\ufeff"Head1" "Head2" "Head3"'''
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
def test_first_row_bom_unquoted(all_parsers):
# see gh-36343
parser = all_parsers
data = """\ufeffHead1 Head2 Head3"""
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
def test_integer_precision(all_parsers):
# Gh 7072
s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765
5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""
parser = all_parsers
result = parser.read_csv(StringIO(s), header=None)[4]
expected = Series([4321583677327450765, 4321113141090630389], name=4)
tm.assert_series_equal(result, expected)
def test_file_descriptor_leak(all_parsers):
# GH 31488
parser = all_parsers
with tm.ensure_clean() as path:
def test():
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
parser.read_csv(path)
td.check_file_leaks(test)()
@pytest.mark.parametrize("nrows", range(1, 6))
def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
# GH 28071
ref = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
columns=list("ab"),
)
csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
parser = all_parsers
df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
tm.assert_frame_equal(df, ref[:nrows])
def test_no_header_two_extra_columns(all_parsers):
# GH 26218
column_names = ["one", "two", "three"]
ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
stream = StringIO("foo,bar,baz,bam,blah")
parser = all_parsers
df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
tm.assert_frame_equal(df, ref)
def test_read_csv_names_not_accepting_sets(all_parsers):
# GH 34946
data = """\
1,2,3
4,5,6\n"""
parser = all_parsers
with pytest.raises(ValueError, match="Names should be an ordered collection."):
parser.read_csv(StringIO(data), names=set("QAZ"))
def test_read_csv_with_use_inf_as_na(all_parsers):
# https://github.com/pandas-dev/pandas/issues/35493
parser = all_parsers
data = "1.0\nNaN\n3.0"
with option_context("use_inf_as_na", True):
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([1.0, np.nan, 3.0])
tm.assert_frame_equal(result, expected)
def test_read_table_delim_whitespace_default_sep(all_parsers):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
result = parser.read_table(f, delim_whitespace=True)
expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
def test_dict_keys_as_names(all_parsers):
# GH: 36928
data = "1,2"
keys = {"a": int, "b": int}.keys()
parser = all_parsers
result = parser.read_csv(StringIO(data), names=keys)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("io_class", [StringIO, BytesIO])
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_read_csv_file_handle(all_parsers, io_class, encoding):
"""
Test whether read_csv does not close user-provided file handles.
GH 36980
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
content = "a,b\n1,2"
if io_class == BytesIO:
content = content.encode("utf-8")
handle = io_class(content)
tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
assert not handle.closed
def test_memory_map_file_handle_silent_fallback(all_parsers, compression):
"""
Do not fail for buffers with memory_map=True (cannot memory map BytesIO).
GH 37621
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
handle = BytesIO()
expected.to_csv(handle, index=False, compression=compression, mode="wb")
handle.seek(0)
tm.assert_frame_equal(
parser.read_csv(handle, memory_map=True, compression=compression),
expected,
)
def test_memory_map_compression(all_parsers, compression):
"""
Support memory map for compressed files.
GH 37621
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
with tm.ensure_clean() as path:
expected.to_csv(path, index=False, compression=compression)
tm.assert_frame_equal(
parser.read_csv(path, memory_map=True, compression=compression),
expected,
)
def test_context_manager(all_parsers, datapath):
# make sure that opened files are closed
parser = all_parsers
path = datapath("io", "data", "csv", "iris.csv")
reader = parser.read_csv(path, chunksize=1)
assert not reader._engine.handles.handle.closed
try:
with reader:
next(reader)
assert False
except AssertionError:
assert reader._engine.handles.handle.closed
def test_context_manageri_user_provided(all_parsers, datapath):
# make sure that user-provided handles are not closed
parser = all_parsers
with open(datapath("io", "data", "csv", "iris.csv"), mode="r") as path:
reader = parser.read_csv(path, chunksize=1)
assert not reader._engine.handles.handle.closed
try:
with reader:
next(reader)
assert False
except AssertionError:
assert not reader._engine.handles.handle.closed
| bsd-3-clause |
araichev/gtfstk | gtfstk/validators.py | 1 | 46598 | """
Functions about validation.
"""
import re
import pytz
import datetime as dt
from typing import Optional, List, Union, TYPE_CHECKING
import pycountry
import numpy as np
import pandas as pd
from pandas import DataFrame
from . import constants as cs
from . import helpers as hp
if TYPE_CHECKING:
from .feed import Feed
TIME_PATTERN1 = re.compile(r"^\d\d:\d\d:\d\d$")
TIME_PATTERN2 = re.compile(r"^\d:\d\d:\d\d$")
DATE_FORMAT = "%Y%m%d"
TIMEZONES = set(pytz.all_timezones)
# ISO639-1 language codes, both lower and upper case
LANGS = set(
[lang.alpha_2 for lang in pycountry.languages if hasattr(lang, "alpha_2")]
)
LANGS |= set(x.upper() for x in LANGS)
CURRENCIES = set(
[c.alpha_3 for c in pycountry.currencies if hasattr(c, "alpha_3")]
)
URL_PATTERN = re.compile(
r"^(?:http)s?://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
EMAIL_PATTERN = re.compile(r"[^@]+@[^@]+\.[^@]+")
COLOR_PATTERN = re.compile(r"(?:[0-9a-fA-F]{2}){3}$")
def valid_str(x: str) -> bool:
"""
Return ``True`` if ``x`` is a non-blank string;
otherwise return ``False``.
"""
if isinstance(x, str) and x.strip():
return True
else:
return False
def valid_time(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid H:MM:SS or HH:MM:SS time;
otherwise return ``False``.
"""
if isinstance(x, str) and (
re.match(TIME_PATTERN1, x) or re.match(TIME_PATTERN2, x)
):
return True
else:
return False
def valid_date(x: str) -> bool:
"""
Retrun ``True`` if ``x`` is a valid YYYYMMDD date;
otherwise return ``False``.
"""
try:
if x != dt.datetime.strptime(x, DATE_FORMAT).strftime(DATE_FORMAT):
raise ValueError
return True
except ValueError:
return False
def valid_timezone(x: str) -> bool:
"""
Retrun ``True`` if ``x`` is a valid human-readable timezone string,
e.g. 'Africa/Abidjan'; otherwise return ``False``.
"""
return x in TIMEZONES
def valid_lang(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid two-letter ISO 639 language
code, e.g. 'aa'; otherwise return ``False``.
"""
return x in LANGS
def valid_currency(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid three-letter ISO 4217 currency
code, e.g. 'AED'; otherwise return ``False``.
"""
return x in CURRENCIES
def valid_url(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid URL; otherwise return ``False``.
"""
if isinstance(x, str) and re.match(URL_PATTERN, x):
return True
else:
return False
def valid_email(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid email address; otherwise return
``False``.
"""
if isinstance(x, str) and re.match(EMAIL_PATTERN, x):
return True
else:
return False
def valid_color(x: str) -> bool:
"""
Return ``True`` if ``x`` a valid hexadecimal color string without
the leading hash; otherwise return ``False``.
"""
if isinstance(x, str) and re.match(COLOR_PATTERN, x):
return True
else:
return False
def check_for_required_columns(
problems: List, table: str, df: DataFrame
) -> List:
"""
Check that the given GTFS table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by GTFS
and append to the problems list one error for each column
missing.
"""
r = cs.GTFS_REF
req_columns = r.loc[
(r["table"] == table) & r["column_required"], "column"
].values
for col in req_columns:
if col not in df.columns:
problems.append(["error", f"Missing column {col}", table, []])
return problems
def check_for_invalid_columns(
problems: List, table: str, df: DataFrame
) -> List:
"""
Check for invalid columns in the given GTFS DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
GTFS and append to the problems list one warning for each extra
column.
"""
r = cs.GTFS_REF
valid_columns = r.loc[r["table"] == table, "column"].values
for col in df.columns:
if col not in valid_columns:
problems.append(
["warning", f"Unrecognized column {col}", table, []]
)
return problems
def check_table(
problems: List,
table: str,
df: DataFrame,
condition,
message: str,
type_: str = "error",
) -> List:
"""
Check the given GTFS table for the given problem condition.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
condition : boolean expression
One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``
message : string
Problem message, e.g. ``'Invalid route_id'``
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` that statisfy the condition.
If the list of indices is nonempty, append to the
problems the item ``[type_, message, table, indices]``;
otherwise do not append anything.
"""
indices = df.loc[condition].index.tolist()
if indices:
problems.append([type_, message, table, indices])
return problems
def check_column(
problems: List,
table: str,
df: DataFrame,
column: str,
checker,
message: Optional[str] = None,
type_: str = "error",
*,
column_required: bool = True,
) -> List:
"""
Check the given column of the given GTFS with the given problem
checker.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
checker : boolean valued unary function
Returns ``True`` if and only if no problem is encountered
message : string (optional)
Problem message, e.g. 'Invalid route_id'.
Defaults to 'Invalid ``column``; maybe has extra space characters'
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Apply the checker to the column entries and record the indices
of ``df`` where the checker returns ``False``.
If the list of indices of is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
before applying the checker.
"""
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(checker)
if not message:
message = f"Invalid {column}; maybe has extra space characters"
problems = check_table(problems, table, f, cond, message, type_)
return problems
def check_column_id(
problems: List,
table: str,
df: DataFrame,
column: str,
*,
column_required: bool = True,
) -> List:
"""
A specialization of :func:`check_column`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` where the given column has
duplicated entry or an invalid strings.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(valid_str)
problems = check_table(
problems,
table,
f,
cond,
f"Invalid {column}; maybe has extra space characters",
)
cond = f[column].duplicated()
problems = check_table(problems, table, f, cond, f"Repeated {column}")
return problems
def check_column_linked_id(
problems: List,
table: str,
df: DataFrame,
column: str,
target_df: DataFrame,
target_column: Optional[str] = None,
*,
column_required: bool = True,
) -> List:
"""
A modified version of :func:`check_column_id`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
target_df : DataFrame
A GTFS table
target_column : string
A column of ``target_df``; defaults to ``column_name``
Returns
-------
list
The ``problems`` list extended as follows.
Record indices of ``df`` where the following condition is
violated: ``column`` contain IDs that are valid strings and are
present in ``target_df`` under the ``target_column`` name.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
if target_column is None:
target_column = column
f = df.copy()
if target_df is None:
g = pd.DataFrame()
g[target_column] = np.nan
else:
g = target_df.copy()
if target_column not in g.columns:
g[target_column] = np.nan
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
g = g.dropna(subset=[target_column])
cond = ~f[column].isin(g[target_column])
problems = check_table(problems, table, f, cond, f"Undefined {column}")
return problems
def format_problems(
problems: List, *, as_df: bool = False
) -> Union[List, DataFrame]:
"""
Format the given problems list as a DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
as_df : boolean
Returns
-------
list or DataFrame
Return ``problems`` if not ``as_df``; otherwise return a
DataFrame with the problems as rows and the columns
``['type', 'message', 'table', 'rows']``.
"""
if as_df:
problems = pd.DataFrame(
problems, columns=["type", "message", "table", "rows"]
).sort_values(["type", "table"])
return problems
def check_agency(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Check that ``feed.agency`` follows the GTFS.
Return a list of problems of the form described in
:func:`check_table`;
the list will be empty if no problems are found.
"""
table = "agency"
problems = []
# Preliminary checks
if feed.agency is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.agency.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(
problems, table, f, "agency_id", column_required=False
)
# Check agency_name
problems = check_column(problems, table, f, "agency_name", valid_str)
# Check agency_url
problems = check_column(problems, table, f, "agency_url", valid_url)
# Check agency_timezone
problems = check_column(
problems, table, f, "agency_timezone", valid_timezone
)
# Check agency_fare_url
problems = check_column(
problems, table, f, "agency_fare_url", valid_url, column_required=False
)
# Check agency_lang
problems = check_column(
problems, table, f, "agency_lang", valid_lang, column_required=False
)
# Check agency_phone
problems = check_column(
problems, table, f, "agency_phone", valid_str, column_required=False
)
# Check agency_email
problems = check_column(
problems, table, f, "agency_email", valid_email, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_calendar(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar``.
"""
table = "calendar"
problems = []
# Preliminary checks
if feed.calendar is None:
return problems
f = feed.calendar.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(problems, table, f, "service_id")
# Check weekday columns
v = lambda x: x in range(2)
for col in [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]:
problems = check_column(problems, table, f, col, v)
# Check start_date and end_date
for col in ["start_date", "end_date"]:
problems = check_column(problems, table, f, col, valid_date)
if include_warnings:
# Check if feed has expired
d = f["end_date"].max()
if feed.calendar_dates is not None and not feed.calendar_dates.empty:
table += "/calendar_dates"
d = max(d, feed.calendar_dates["date"].max())
if d < dt.datetime.today().strftime(DATE_FORMAT):
problems.append(["warning", "Feed expired", table, []])
return format_problems(problems, as_df=as_df)
def check_calendar_dates(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "calendar_dates"
problems = []
# Preliminary checks
if feed.calendar_dates is None:
return problems
f = feed.calendar_dates.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column(problems, table, f, "service_id", valid_str)
# Check date
problems = check_column(problems, table, f, "date", valid_date)
# No duplicate (service_id, date) pairs allowed
cond = f[["service_id", "date"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (service_id, date)"
)
# Check exception_type
v = lambda x: x in [1, 2]
problems = check_column(problems, table, f, "exception_type", v)
return format_problems(problems, as_df=as_df)
def check_fare_attributes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_attributes"
problems = []
# Preliminary checks
if feed.fare_attributes is None:
return problems
f = feed.fare_attributes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_id(problems, table, f, "fare_id")
# Check currency_type
problems = check_column(
problems, table, f, "currency_type", valid_currency
)
# Check payment_method
v = lambda x: x in range(2)
problems = check_column(problems, table, f, "payment_method", v)
# Check transfers
v = lambda x: pd.isna(x) or x in range(3)
problems = check_column(problems, table, f, "transfers", v)
# Check transfer_duration
v = lambda x: x >= 0
problems = check_column(
problems, table, f, "transfer_duration", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_fare_rules(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_rules"
problems = []
# Preliminary checks
if feed.fare_rules is None:
return problems
f = feed.fare_rules.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_linked_id(
problems, table, f, "fare_id", feed.fare_attributes
)
# Check route_id
problems = check_column_linked_id(
problems, table, f, "route_id", feed.routes, column_required=False
)
# Check origin_id, destination_id, contains_id
for col in ["origin_id", "destination_id", "contains_id"]:
problems = check_column_linked_id(
problems,
table,
f,
col,
feed.stops,
"zone_id",
column_required=False,
)
return format_problems(problems, as_df=as_df)
def check_feed_info(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.feed_info``.
"""
table = "feed_info"
problems = []
# Preliminary checks
if feed.feed_info is None:
return problems
f = feed.feed_info.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check feed_publisher_name
problems = check_column(
problems, table, f, "feed_publisher_name", valid_str
)
# Check feed_publisher_url
problems = check_column(
problems, table, f, "feed_publisher_url", valid_url
)
# Check feed_lang
problems = check_column(problems, table, f, "feed_lang", valid_lang)
# Check feed_start_date and feed_end_date
cols = ["feed_start_date", "feed_end_date"]
for col in cols:
problems = check_column(
problems, table, f, col, valid_date, column_required=False
)
if set(cols) <= set(f.columns):
d1, d2 = f.loc[0, ["feed_start_date", "feed_end_date"]].values
if pd.notna(d1) and pd.notna(d2) and d1 > d1:
problems.append(
[
"error",
"feed_start_date later than feed_end_date",
table,
[0],
]
)
# Check feed_version
problems = check_column(
problems, table, f, "feed_version", valid_str, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_frequencies(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.frequencies``.
"""
table = "frequencies"
problems = []
# Preliminary checks
if feed.frequencies is None:
return problems
f = feed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check start_time and end_time
time_cols = ["start_time", "end_time"]
for col in time_cols:
problems = check_column(problems, table, f, col, valid_time)
for col in time_cols:
f[col] = f[col].map(hp.timestr_to_seconds)
# Start_time should be earlier than end_time
cond = f["start_time"] >= f["end_time"]
problems = check_table(
problems, table, f, cond, "start_time not earlier than end_time"
)
# Headway periods should not overlap
f = f.sort_values(["trip_id", "start_time"])
for __, group in f.groupby("trip_id"):
a = group["start_time"].values
b = group["end_time"].values
indices = np.flatnonzero(a[1:] < b[:-1]).tolist()
if indices:
problems.append(
[
"error",
"Headway periods for the same trip overlap",
table,
indices,
]
)
# Check headway_secs
v = lambda x: x >= 0
problems = check_column(problems, table, f, "headway_secs", v)
# Check exact_times
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "exact_times", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_routes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.routes``.
"""
table = "routes"
problems = []
# Preliminary checks
if feed.routes is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.routes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_id
problems = check_column_id(problems, table, f, "route_id")
# Check agency_id
if "agency_id" in f:
if feed.agency is None:
problems.append(
[
"error",
"agency_id column present in routes agency table missing",
table,
[],
]
)
elif "agency_id" not in feed.agency.columns:
problems.append(
[
"error",
"agency_id column present in routes but not in agency",
table,
[],
]
)
else:
g = f.dropna(subset=["agency_id"])
cond = ~g["agency_id"].isin(feed.agency["agency_id"])
problems = check_table(
problems, table, g, cond, "Undefined agency_id"
)
# Check route_short_name and route_long_name
for column in ["route_short_name", "route_long_name"]:
problems = check_column(
problems, table, f, column, valid_str, column_required=False
)
cond = ~(f["route_short_name"].notna() | f["route_long_name"].notna())
problems = check_table(
problems,
table,
f,
cond,
"route_short_name and route_long_name both empty",
)
# Check route_type
v = lambda x: x in range(8)
problems = check_column(problems, table, f, "route_type", v)
# Check route_url
problems = check_column(
problems, table, f, "route_url", valid_url, column_required=False
)
# Check route_color and route_text_color
for col in ["route_color", "route_text_color"]:
problems = check_column(
problems, table, f, col, valid_color, column_required=False
)
if include_warnings:
# Check for duplicated (route_short_name, route_long_name) pairs
cond = f[["route_short_name", "route_long_name"]].duplicated()
problems = check_table(
problems,
table,
f,
cond,
"Repeated pair (route_short_name, route_long_name)",
"warning",
)
# Check for routes without trips
s = feed.trips["route_id"]
cond = ~f["route_id"].isin(s)
problems = check_table(
problems, table, f, cond, "Route has no trips", "warning"
)
return format_problems(problems, as_df=as_df)
def check_shapes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.shapes``.
"""
table = "shapes"
problems = []
# Preliminary checks
if feed.shapes is None:
return problems
f = feed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
f.sort_values(["shape_id", "shape_pt_sequence"], inplace=True)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = check_column(problems, table, f, "shape_id", valid_str)
# Check shape_pt_lon and shape_pt_lat
for column, bound in [("shape_pt_lon", 180), ("shape_pt_lat", 90)]:
v = lambda x: pd.notna(x) and -bound <= x <= bound
cond = ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
# Check for duplicated (shape_id, shape_pt_sequence) pairs
cond = f[["shape_id", "shape_pt_sequence"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (shape_id, shape_pt_sequence)"
)
# Check if shape_dist_traveled does decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_sid = None
prev_index = None
prev_dist = -1
cols = ["shape_id", "shape_dist_traveled"]
for i, sid, dist in g[cols].itertuples():
if sid == prev_sid and dist < prev_dist:
indices.append(prev_index)
prev_sid = sid
prev_index = i
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
return format_problems(problems, as_df=as_df)
def check_stops(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stops``.
"""
table = "stops"
problems = []
# Preliminary checks
if feed.stops is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stops.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check stop_id
problems = check_column_id(problems, table, f, "stop_id")
# Check stop_code, stop_desc, zone_id, parent_station
for column in ["stop_code", "stop_desc", "zone_id", "parent_station"]:
problems = check_column(
problems, table, f, column, valid_str, column_required=False
)
# Check stop_name
problems = check_column(problems, table, f, "stop_name", valid_str)
# Check stop_lon and stop_lat
if "location_type" in f.columns:
requires_location = f.location_type.isin([0, 1, 2])
else:
requires_location = True
for column, bound in [("stop_lon", 180), ("stop_lat", 90)]:
v = lambda x: pd.notna(x) and -bound <= x <= bound
cond = requires_location & ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
# Check stop_url
problems = check_column(
problems, table, f, "stop_url", valid_url, column_required=False
)
# Check location_type
v = lambda x: x in range(5)
problems = check_column(
problems, table, f, "location_type", v, column_required=False
)
# Check stop_timezone
problems = check_column(
problems,
table,
f,
"stop_timezone",
valid_timezone,
column_required=False,
)
# Check wheelchair_boarding
v = lambda x: x in range(3)
problems = check_column(
problems, table, f, "wheelchair_boarding", v, column_required=False
)
# Check further location_type and parent_station
if "parent_station" in f.columns:
if "location_type" not in f.columns:
problems.append(
[
"error",
"parent_station column present but location_type column missing",
table,
[],
]
)
else:
# Parent stations must be well-defined
S = set(f.stop_id) | {np.nan}
v = lambda x: x in S
problems = check_column(
problems,
table,
f,
"parent_station",
v,
"A parent station must be well-defined",
column_required=False,
)
# Stations must have location type 1
station_ids = f.loc[f.parent_station.notna(), "parent_station"]
cond = f.stop_id.isin(station_ids) & (f.location_type != 1)
problems = check_table(
problems, table, f, cond, "A station must have location_type 1"
)
# Stations must not lie in stations
cond = (f.location_type == 1) & f.parent_station.notna()
problems = check_table(
problems,
table,
f,
cond,
"A station must not lie in another station",
)
# Entrances (type 2), generic nodes (type 3) and boarding areas (type 4)
# need to be part of a parent
cond = f.location_type.isin([2, 3, 4]) & f.parent_station.isna()
problems = check_table(
problems,
table,
f,
cond,
"Entrances, nodes, and boarding areas must be part of a parent station",
)
if include_warnings:
# Check for stops of location type 0 or NaN without stop times
ids = []
if feed.stop_times is not None:
ids = feed.stop_times.stop_id.unique()
cond = ~feed.stops.stop_id.isin(ids)
if "location_type" in feed.stops.columns:
cond &= f.location_type.isin([0, np.nan])
problems = check_table(
problems, table, f, cond, "Stop has no stop times", "warning"
)
return format_problems(problems, as_df=as_df)
def check_stop_times(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stop_times``.
"""
table = "stop_times"
problems = []
# Preliminary checks
if feed.stop_times is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stop_times.copy().sort_values(["trip_id", "stop_sequence"])
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check arrival_time and departure_time
v = lambda x: pd.isna(x) or valid_time(x)
for col in ["arrival_time", "departure_time"]:
problems = check_column(problems, table, f, col, v)
# Check that arrival and departure times exist for the first and last
# stop of each trip and for each timepoint.
# For feeds with many trips, iterating through the stop time rows is
# faster than uisg groupby.
if "timepoint" not in f.columns:
f["timepoint"] = np.nan # This will not mess up later timepoint check
indices = []
prev_tid = None
prev_index = None
prev_atime = 1
prev_dtime = 1
for i, tid, atime, dtime, tp in f[
["trip_id", "arrival_time", "departure_time", "timepoint"]
].itertuples():
if tid != prev_tid:
# Check last stop of previous trip
if pd.isna(prev_atime) or pd.isna(prev_dtime):
indices.append(prev_index)
# Check first stop of current trip
if pd.isna(atime) or pd.isna(dtime):
indices.append(i)
elif tp == 1 and (pd.isna(atime) or pd.isna(dtime)):
# Failure at timepoint
indices.append(i)
prev_tid = tid
prev_index = i
prev_atime = atime
prev_dtime = dtime
if pd.isna(prev_atime) or pd.isna(prev_dtime):
indices.append(prev_index)
if indices:
problems.append(
[
"error",
"First/last/time point arrival/departure time missing",
table,
indices,
]
)
# Check stop_id
problems = check_column_linked_id(
problems, table, f, "stop_id", feed.stops
)
# Check for duplicated (trip_id, stop_sequence) pairs
cond = f[["trip_id", "stop_sequence"]].dropna().duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (trip_id, stop_sequence)"
)
# Check stop_headsign
problems = check_column(
problems, table, f, "stop_headsign", valid_str, column_required=False
)
# Check pickup_type and drop_off_type
for col in ["pickup_type", "drop_off_type"]:
v = lambda x: x in range(4)
problems = check_column(
problems, table, f, col, v, column_required=False
)
# Check if shape_dist_traveled decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_tid = None
prev_dist = -1
for i, tid, dist in g[["trip_id", "shape_dist_traveled"]].itertuples():
if tid == prev_tid and dist < prev_dist:
indices.append(i)
prev_tid = tid
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
# Check timepoint
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "timepoint", v, column_required=False
)
if include_warnings:
# Check for duplicated (trip_id, departure_time) pairs
cond = f[["trip_id", "departure_time"]].duplicated()
problems = check_table(
problems,
table,
f,
cond,
"Repeated pair (trip_id, departure_time)",
"warning",
)
return format_problems(problems, as_df=as_df)
def check_transfers(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.transfers``.
"""
table = "transfers"
problems = []
# Preliminary checks
if feed.transfers is None:
return problems
f = feed.transfers.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check from_stop_id and to_stop_id
for col in ["from_stop_id", "to_stop_id"]:
problems = check_column_linked_id(
problems, table, f, col, feed.stops, "stop_id"
)
# Check transfer_type
v = lambda x: pd.isna(x) or x in range(5)
problems = check_column(
problems, table, f, "transfer_type", v, column_required=False
)
# Check min_transfer_time
v = lambda x: x >= 0
problems = check_column(
problems, table, f, "min_transfer_time", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_trips(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.trips``.
"""
table = "trips"
problems = []
# Preliminary checks
if feed.trips is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.trips.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_id(problems, table, f, "trip_id")
# Check route_id
problems = check_column_linked_id(
problems, table, f, "route_id", feed.routes
)
# Check service_id
g = pd.DataFrame()
if feed.calendar is not None:
g = pd.concat([g, feed.calendar], sort=False)
if feed.calendar_dates is not None:
g = pd.concat([g, feed.calendar_dates], sort=False)
problems = check_column_linked_id(problems, table, f, "service_id", g)
# Check direction_id
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "direction_id", v, column_required=False
)
# Check block_id
if "block_id" in f.columns:
v = lambda x: pd.isna(x) or valid_str(x)
cond = ~f["block_id"].map(v)
problems = check_table(problems, table, f, cond, "Blank block_id")
# Check shape_id
problems = check_column_linked_id(
problems, table, f, "shape_id", feed.shapes, column_required=False
)
# Check wheelchair_accessible and bikes_allowed
v = lambda x: x in range(3)
for column in ["wheelchair_accessible", "bikes_allowed"]:
problems = check_column(
problems, table, f, column, v, column_required=False
)
# Check for trips with no stop times
if include_warnings:
s = feed.stop_times["trip_id"] if feed.stop_times is not None else []
cond = ~f["trip_id"].isin(s)
problems = check_table(
problems, table, f, cond, "Trip has no stop times", "warning"
)
return format_problems(problems, as_df=as_df)
def validate(
feed: "Feed", *, as_df: bool = True, include_warnings: bool = True
) -> Union[List, DataFrame]:
"""
Check whether the given feed satisfies the GTFS.
Parameters
----------
feed : Feed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the GTFS is
violated; 'warning' means there is a problem but it's not a
GTFS violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the feed is missing required tables or required
columns.
Notes
-----
- This function interprets the GTFS liberally, classifying problems
as warnings rather than errors where the GTFS is unclear.
For example if a trip_id listed in the trips table is not listed
in the stop times table (a trip with no stop times),
then that's a warning and not an error.
- Timing benchmark: on a 2.80 GHz processor machine with 16 GB of
memory, this function checks `this 31 MB Southeast Queensland feed
<http://transitfeeds.com/p/translink/21/20170310>`_
in 22 seconds, including warnings.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
"check_agency",
"check_calendar",
"check_calendar_dates",
"check_fare_attributes",
"check_fare_rules",
"check_feed_info",
"check_frequencies",
"check_routes",
"check_shapes",
"check_stops",
"check_stop_times",
"check_transfers",
"check_trips",
]
for checker in checkers:
problems.extend(
globals()[checker](feed, include_warnings=include_warnings)
)
# Check calendar/calendar_dates combo
if feed.calendar is None and feed.calendar_dates is None:
problems.append(
["error", "Missing both tables", "calendar & calendar_dates", []]
)
return format_problems(problems, as_df=as_df)
| mit |
themrmax/scikit-learn | examples/cluster/plot_dbscan.py | 3 | 2508 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
jmausolf/HIV_Status | pipeline/hiv_pipeline.py | 1 | 16198 | ##########################################
## ##
## Joshua G. Mausolf ##
## Department of Sociology ##
## University of Chicago ##
## ##
##########################################
import pandas as pd
import numpy as np
import pipeline as ml
def summarize_data(dataset):
###############
## LOAD DATA ##
###############
print "Loading data..."
df = ml.read_data(dataset)
variables = list(df.columns.values)
#print variables
####################################
## RUN INITIAL SUMMARY STATISTICS ##
####################################
print "Running summary statistics..."
ml.summarize_dataset(dataset)
for v in variables:
ml.summary_statistics(v, dataset, 5, 10)
return df
def clean_data(df, cohort):
print "Cleaning data..."
################################
## DROP UNNECESSARY VARIABLES ##
################################
print "Dropping unnecessary variables..."
if cohort == 'cohort1':
print "for cohort 1..."
variables_to_drop = ['g6_tardyr','g6_school_name', 'g7_school_name', 'g8_school_name', 'g9_school_name', 'g10_school_name', 'g11_school_name', 'g12_school_name','g6_year', 'g6_gradeexp', 'g6_grade', 'g6_wcode', 'g7_year', 'g7_gradeexp', 'g7_grade', 'g7_wcode', 'g8_year', 'g8_gradeexp', 'g8_grade', 'g8_wcode', 'g9_year', 'g9_gradeexp', 'g9_grade', 'g9_wcode', 'g10_year', 'g10_gradeexp', 'g10_grade', 'g10_wcode', 'g11_year', 'g11_gradeexp', 'g11_grade', 'g11_wcode', 'g12_year', 'g12_gradeexp', 'g12_grade', 'g12_wcode']
for v in variables_to_drop:
df.drop(v, axis=1, inplace=True)
elif cohort == 'cohort2':
print "for cohort 2..."
variables_to_drop = ['g6_tardyr','g6_school_name', 'g7_school_name', 'g8_school_name', 'g9_school_name', 'g10_school_name', 'g11_school_name', 'g12_school_name','g6_year', 'g6_grade', 'g6_wcode', 'g7_year', 'g7_grade', 'g7_wcode', 'g8_year', 'g8_grade', 'g8_wcode', 'g9_year', 'g9_grade', 'g9_wcode', 'g10_year', 'g10_grade', 'g10_wcode', 'g11_year', 'g11_grade', 'g11_wcode', 'g12_year', 'g12_grade', 'g12_wcode']
for v in variables_to_drop:
df.drop(v, axis=1, inplace=True)
else:
pass
#######################
## COMBINE VARIABLES ##
#######################
## Create single column for birth year
print "Correcting birthdays..."
df['birthday'] = df['g11_byrmm']
birthday_cols = ['g12_byrmm', 'g11_byrmm', 'g10_byrmm', 'g9_byrmm', 'g8_byrmm', 'g7_byrmm', 'g6_byrmm']
for c in birthday_cols:
ml.replace_with_other_col(df, 'birthday', c)
df.drop(c, axis=1, inplace=True)
#print ml.summarize(df['birthday'])
## Create single column for gender
print "Correcting gender..."
df['gender'] = df['g11_gender']
gender_cols = ['g12_gender', 'g11_gender', 'g10_gender', 'g9_gender', 'g8_gender', 'g7_gender', 'g6_gender']
for c in gender_cols:
ml.replace_with_other_col(df, 'gender', c)
df.drop(c, axis=1, inplace=True)
#print df['gender'].value_counts()
################
## CLEAN DATA ##
################
print "Cleaning data..."
retained_cols = ['g11_retained', 'g12_retained', 'g9_newmcps', 'g10_newmcps', 'g11_newmcps', 'g12_newmcps', 'g9_newus', 'g10_newus', 'g11_newus', 'g12_newus']
for col in retained_cols:
for index, row in df.iterrows():
if pd.isnull(row[col]):
df.ix[index, col] = 0
else:
df.ix[index, col] = 1
df[col] = df[col].astype(int)
###############################
## CREATE MISSING DATA FLAGS ##
###############################
print "Creating missing data flags..."
## Create flag if a given student is missing a year's worth of data
grade_id = ['g6_pid', 'g7_pid', 'g8_pid', 'g9_pid', 'g10_pid', 'g11_pid', 'g12_pid']
year = 6
for g in grade_id:
col_name = 'g' + str(year) + '_missing'
for index, row in df.iterrows():
if pd.isnull(row[g]):
df.ix[index, col_name] = 1
else:
df.ix[index, col_name] = 0
df.drop(g, axis=1, inplace=True)
year+=1
ml.print_to_csv(df, 'data/predummy_data.csv')
#ml.print_to_csv(df, '/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/predummy_data.csv')
def deal_with_dummies(dataset):
df = ml.read_data(dataset)
###################################
## CREATE DUMMY VARIABLE COLUMNS ##
###################################
print "Creating dummy variables..."
string_cols = list(df.select_dtypes(include=['object']))
print string_cols
df = ml.get_dummys(df, string_cols, dummy_na=True)
for col in string_cols:
print col
df.drop(col, axis=1, inplace=True)
## Save clean version
ml.print_to_csv(df, 'data/clean_data.csv')
#ml.print_to_csv(df, '/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/clean_data.csv')
def impute_data(dataset, cohort):
df = ml.read_data(dataset)
##########################
## IMPUTE ACADEMIC DATA ##
##########################
print "Impute missing academic information..."
## Fill missing school data -- use mean imputation for now
school_vars = ['g6_school_id', 'g7_school_id', 'g8_school_id', 'g9_school_id', 'g10_school_id', 'g11_school_id', 'g12_school_id']
ml.replace_with_mean(df, school_vars)
## Fill missing grade and test score information -- use mean imputation for now
grades_tests = ['g6_q1mpa', 'g6_q2mpa', 'g6_q3mpa', 'g6_q4mpa', 'g6_g6mapr','g7_q1mpa', 'g7_q2mpa', 'g7_q3mpa', 'g7_q4mpa', 'g7_g7mapr', 'g8_q1mpa', 'g8_q2mpa', 'g8_q3mpa', 'g8_q4mpa', 'g8_g8mapr', 'g9_q1mpa', 'g9_q2mpa', 'g9_q3mpa', 'g9_q4mpa', 'g9_g8mapr', 'g10_q1mpa', 'g10_q2mpa', 'g10_q3mpa', 'g10_q4mpa', 'g10_psatv', 'g10_psatm', 'g11_q1mpa', 'g11_q2mpa', 'g11_q3mpa', 'g11_q4mpa', 'g11_psatv', 'g11_psatm', 'g12_q1mpa', 'g12_q2mpa', 'g12_q3mpa', 'g12_q4mpa', 'g12_psatv', 'g12_psatm']
ml.replace_with_mean(df, grades_tests)
## Fill in missing id with dummy
ml.replace_with_value(df, 'id', 0)
## Fill missing MSAM data
g6_msam = ['g6_g6msam_Advanced','g6_g6msam_Basic','g6_g6msam_Proficient']
ml.replace_dummy_null_mean(df, 'g6_g6msam_nan', g6_msam)
if cohort == 'cohort1':
g7_msam = ['g7_g7msam_Advanced','g7_g7msam_Basic','g7_g7msam_Proficient']
ml.replace_dummy_null_mean(df, 'g7_g7msam_nan', g7_msam)
elif cohort == 'cohort2':
g7_msam = ['g7_g7msam_ ','g7_g7msam_1','g7_g7msam_2', 'g7_g7msam_3']
ml.replace_dummy_null_mean(df, 'g7_g7msam_nan', g7_msam)
g8_msam = ['g8_g8msam_Advanced','g8_g8msam_Basic','g8_g8msam_Proficient']
ml.replace_dummy_null_mean(df, 'g8_g8msam_nan', g8_msam)
g9_msam = ['g9_g8msam_Advanced','g9_g8msam_Basic','g9_g8msam_Proficient']
ml.replace_dummy_null_mean(df,'g9_g8msam_nan', g9_msam)
############################
## IMPUTE BEHAVIORAL DATA ##
############################
print "Impute missing behavioral data..."
## Fill missing behavioral data -- use mean imputation for now
behavioral_cols = ['g6_absrate', 'g6_nsusp','g7_absrate', 'g7_tardyr', 'g7_nsusp', 'g8_absrate', 'g8_tardyr', 'g8_nsusp', 'g9_absrate', 'g9_nsusp', 'g10_absrate', 'g10_nsusp', 'g11_absrate', 'g11_nsusp','g12_absrate', 'g12_nsusp']
ml.replace_with_mean(df, behavioral_cols)
## Fill in missing birthday data
#ml.replace_with_mean(df, 'birthday')
############################
## IMPUTE ENROLLMENT DATA ##
############################
print "Imputing missing enrollment data..."
## Fill missing enrollment data
print "Fixing mobility columns..."
mobility_cols = ['g10_retained', 'g6_mobility', 'g7_mobility', 'g8_mobility', 'g9_mobility', 'g9_retained','g10_mobility', 'g11_mobility', 'g12_mobility', 'birthday']
# Includes g10_retained because it's coded as 0/1 already
ml.replace_with_mean(df, mobility_cols)
#########################
## IMPUTE DROPOUT DATA ##
#########################
print "Impute missing droput information..."
## Fill missing dropout information with 0
dropout_vars = ['g6_dropout', 'g7_dropout', 'g8_dropout', 'g9_dropout', 'g10_dropout', 'g11_dropout', 'g12_dropout', 'dropout']
ml.replace_with_value(df, dropout_vars, [0,0,0,0,0,0,0,0])
#variables = list(df.columns.values)
#print variables
############################
# IMPUTE NEIGHBORHOOD DATA #
############################
print "Imputing missing school neighborhood data..."
## Fill missing school neighborhood data
print "Fixing neighborhood columns..."
"""
neighborhood_cols = ['suspensionrate', 'mobilityrateentrantswithdra', 'attendancerate', 'avg_class_size', 'studentinstructionalstaffratio', 'dropoutrate', 'grade12documenteddecisionco', 'grade12documenteddecisionem', 'grade12documenteddecisionmi', 'grad12docdec_col_emp', 'graduationrate', 'studentsmeetinguniversitysyste', 'Est_Households_2012', 'Est_Population_2012', 'Med_Household_Income_2012', 'Mean_Household_Income_2012', 'Pop_Below_Poverty_2012', 'Percent_Below_Poverty_2012', 'Pop_Under18_2012', 'Under18_Below_Poverty_2012', 'Under18_Below_Poverty_Percent_2012', 'Housholds_on_Food_stamps_with_Children_Under18_2012', 'Housholds_Pop_on_Food_Stamps_2012', 'Pop_BlackAA_2012', 'Pop_White_2012', 'Bt_18_24_percent_less_than_High_School_2012', 'Bt_18_24_percent_High_School_2012', 'Bt_18_24_percent_Some_College_or_AA_2012', 'Bt_1824_percent_BA_or_Higher_2012', 'Over_25_percent_less_than_9th_grade_2012', 'Over_25_percent_9th_12th_2012', 'Over_25_percent_High_School_2012', 'Over_25__percent_Some_College_No_Deg_2012', 'Over_25_percent_AA_2012', 'Over_25_percent_Bachelors_2012', 'Over_25_percent_Graduate_or_Professionals_2012']
"""
neighborhood_cols = ['g9_suspensionrate', 'g10_suspensionrate', 'g11_suspensionrate', 'g12_suspensionrate', 'g9_mobilityrateentrantswithdra', 'g10_mobilityrateentrantswithdra', 'g11_mobilityrateentrantswithdra', 'g12_mobilityrateentrantswithdra', 'g9_attendancerate', 'g10_attendancerate', 'g11_attendancerate', 'g12_attendancerate','g9_avg_class_size', 'g10_avg_class_size', 'g11_avg_class_size', 'g12_avg_class_size','g9_studentinstructionalstaffratio', 'g10_studentinstructionalstaffratio', 'g11_studentinstructionalstaffratio', 'g12_studentinstructionalstaffratio','g9_dropoutrate', 'g10_dropoutrate', 'g11_dropoutrate', 'g12_dropoutrate', 'g9_grade12documenteddecisionco', 'g10_grade12documenteddecisionco', 'g11_grade12documenteddecisionco', 'g12_grade12documenteddecisionco','g9_grade12documenteddecisionem', 'g10_grade12documenteddecisionem', 'g11_grade12documenteddecisionem', 'g12_grade12documenteddecisionem','g9_grade12documenteddecisionmi', 'g10_grade12documenteddecisionmi', 'g11_grade12documenteddecisionmi', 'g12_grade12documenteddecisionmi', 'g9_grad12docdec_col_emp', 'g10_grad12docdec_col_emp', 'g11_grad12docdec_col_emp', 'g12_grad12docdec_col_emp', 'g9_graduationrate', 'g10_graduationrate', 'g11_graduationrate', 'g12_graduationrate','g9_studentsmeetinguniversitysyste', 'g10_studentsmeetinguniversitysyste', 'g11_studentsmeetinguniversitysyste', 'g12_studentsmeetinguniversitysyste', 'g9_Est_Households_2012', 'g10_Est_Households_2012', 'g11_Est_Households_2012', 'g12_Est_Households_2012','g9_Est_Population_2012', 'g10_Est_Population_2012', 'g11_Est_Population_2012', 'g12_Est_Population_2012', 'g9_Med_Household_Income_2012', 'g10_Med_Household_Income_2012', 'g11_Med_Household_Income_2012', 'g12_Med_Household_Income_2012', 'g9_Mean_Household_Income_2012', 'g10_Mean_Household_Income_2012', 'g11_Mean_Household_Income_2012', 'g12_Mean_Household_Income_2012', 'g9_Pop_Below_Poverty_2012', 'g10_Pop_Below_Poverty_2012', 'g11_Pop_Below_Poverty_2012', 'g12_Pop_Below_Poverty_2012', 'g9_Percent_Below_Poverty_2012', 'g10_Percent_Below_Poverty_2012', 'g11_Percent_Below_Poverty_2012', 'g12_Percent_Below_Poverty_2012', 'g9_Pop_Under18_2012', 'g10_Pop_Under18_2012', 'g11_Pop_Under18_2012', 'g12_Pop_Under18_2012', 'g9_Under18_Below_Poverty_2012', 'g10_Under18_Below_Poverty_2012', 'g11_Under18_Below_Poverty_2012', 'g12_Under18_Below_Poverty_2012', 'g9_Under18_Below_Poverty_Percent_2012', 'g10_Under18_Below_Poverty_Percent_2012', 'g11_Under18_Below_Poverty_Percent_2012', 'g12_Under18_Below_Poverty_Percent_2012', 'g9_Housholds_on_Food_stamps_with_Children_Under18_2012', 'g10_Housholds_on_Food_stamps_with_Children_Under18_2012', 'g11_Housholds_on_Food_stamps_with_Children_Under18_2012', 'g12_Housholds_on_Food_stamps_with_Children_Under18_2012', 'g9_Housholds_Pop_on_Food_Stamps_2012', 'g10_Housholds_Pop_on_Food_Stamps_2012', 'g11_Housholds_Pop_on_Food_Stamps_2012', 'g12_Housholds_Pop_on_Food_Stamps_2012', 'g9_Pop_BlackAA_2012', 'g10_Pop_BlackAA_2012', 'g11_Pop_BlackAA_2012', 'g12_Pop_BlackAA_2012', 'g9_Pop_White_2012', 'g10_Pop_White_2012', 'g11_Pop_White_2012', 'g12_Pop_White_2012', 'g9_Bt_18_24_percent_less_than_High_School_2012', 'g10_Bt_18_24_percent_less_than_High_School_2012', 'g11_Bt_18_24_percent_less_than_High_School_2012', 'g12_Bt_18_24_percent_less_than_High_School_2012', 'g9_Bt_18_24_percent_High_School_2012', 'g10_Bt_18_24_percent_High_School_2012', 'g11_Bt_18_24_percent_High_School_2012', 'g12_Bt_18_24_percent_High_School_2012', 'g9_Bt_18_24_percent_Some_College_or_AA_2012', 'g10_Bt_18_24_percent_Some_College_or_AA_2012', 'g11_Bt_18_24_percent_Some_College_or_AA_2012', 'g12_Bt_18_24_percent_Some_College_or_AA_2012', 'g9_Bt_1824_percent_BA_or_Higher_2012', 'g10_Bt_1824_percent_BA_or_Higher_2012', 'g11_Bt_1824_percent_BA_or_Higher_2012', 'g12_Bt_1824_percent_BA_or_Higher_2012', 'g9_Over_25_percent_less_than_9th_grade_2012', 'g10_Over_25_percent_less_than_9th_grade_2012', 'g11_Over_25_percent_less_than_9th_grade_2012', 'g12_Over_25_percent_less_than_9th_grade_2012', 'g9_Over_25_percent_9th_12th_2012', 'g10_Over_25_percent_9th_12th_2012', 'g11_Over_25_percent_9th_12th_2012', 'g12_Over_25_percent_9th_12th_2012', 'g9_Over_25_percent_High_School_2012', 'g10_Over_25_percent_High_School_2012', 'g11_Over_25_percent_High_School_2012', 'g12_Over_25_percent_High_School_2012', 'g9_Over_25__percent_Some_College_No_Deg_2012', 'g10_Over_25__percent_Some_College_No_Deg_2012', 'g11_Over_25__percent_Some_College_No_Deg_2012', 'g12_Over_25__percent_Some_College_No_Deg_2012', 'g9_Over_25_percent_AA_2012', 'g10_Over_25_percent_AA_2012', 'g11_Over_25_percent_AA_2012', 'g12_Over_25_percent_AA_2012', 'g9_Over_25_percent_Bachelors_2012', 'g10_Over_25_percent_Bachelors_2012', 'g11_Over_25_percent_Bachelors_2012', 'g12_Over_25_percent_Bachelors_2012', 'g9_Over_25_percent_Graduate_or_Professionals_2012', 'g10_Over_25_percent_Graduate_or_Professionals_2012', 'g11_Over_25_percent_Graduate_or_Professionals_2012', 'g12_Over_25_percent_Graduate_or_Professionals_2012']
ml.replace_with_mean(df, neighborhood_cols)
summary = ml.summarize(df)
print summary.T
#ml.print_to_csv(summary.T, 'updated_summary_stats_vertical.csv')
ml.print_to_csv(df, 'data/imputed_data.csv')
#ml.print_to_csv(df, '/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/imputed_data.csv')
print "Done!"
#-------------------------------------------------------
if __name__ == '__main__':
dataset = "data/cohort1_all_school.csv"
#dataset = "data/cohort2_all_school.csv"
#dataset = "/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/cohort1_all_school.csv"
#df = summarize_data(dataset)
df = ml.read_data(dataset)
#clean_data(df, 'cohort1')
#clean_data(df, 'cohort2')
#non_dummy_data = 'data/predummy_data.csv'
#non_dummy_data = '/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/predummy_data.csv'
#deal_with_dummies(non_dummy_data)
clean_dataset = 'data/clean_data.csv'
#clean_dataset = '/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/clean_data.csv'
impute_data(clean_dataset, 'cohort1')
#impute_data(clean_dataset, 'cohort2')
| gpl-3.0 |
JohanComparat/pySU | spm/bin_SMF/create_table_completeness.py | 1 | 9615 | import astropy.io.fits as fits
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
from scipy.stats import scoreatpercentile as sc
survey = sys.argv[1]
z_min, z_max = 0., 1.6
imfs = ["Chabrier_ELODIE_", "Chabrier_MILES_", "Chabrier_STELIB_", "Kroupa_ELODIE_", "Kroupa_MILES_", "Kroupa_STELIB_", "Salpeter_ELODIE_", "Salpeter_MILES_", "Salpeter_STELIB_" ]
out_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'results')
#path_2_MAG_cat = os.path.join( os.environ['HOME'], 'SDSS', "dr14_specphot_gri.fits" )
#hd = fits.open(path_2_MAG_cat)
#path_2_sdss_cat = os.path.join( os.environ['HOME'], 'SDSS', '26', 'catalogs', "FireFly.fits" )
#path_2_eboss_cat = os.path.join( os.environ['HOME'], 'SDSS', 'v5_10_0', 'catalogs', "FireFly.fits" )
path_2_sdss_cat = os.path.join( os.environ['OBS_REPO'], 'SDSS', '26', 'catalogs', "FireFly.fits" )
path_2_eboss_cat = os.path.join( os.environ['OBS_REPO'], 'SDSS', 'v5_10_0', 'catalogs', "FireFly.fits" )
# OPENS THE CATALOGS
print("Loads catalog")
if survey =='deep2':
deep2_dir = os.path.join(os.environ['OBS_REPO'], 'DEEP2')
path_2_deep2_cat = os.path.join( deep2_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck13.spm.fits" )
catalog = fits.open(path_2_deep2_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'ZBEST', 'ZERR', 'CLASS', 'ZQUALITY'
if survey =='sdss':
catalog = fits.open(path_2_sdss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z', 'Z_ERR', 'CLASS', 'ZWARNING'
if survey =='boss':
catalog = fits.open(path_2_eboss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO'
IMF = imfs[0]
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
print(IMF, prf)
name, zflg_val, prefix = prf, 0., IMF
catalog_0 = (catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name]) & (catalog[class_name]=='GALAXY') & (catalog[zwarning]==zflg_val) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_zOk = catalog_0 & (catalog['SNR_ALL']>0)
converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.8 )
dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.4 )
#target_bits
program_names = n.array(list(set( catalog['PROGRAMNAME'] )))
program_names.sort()
sourcetypes = n.array(list(set( catalog['SOURCETYPE'] )))
sourcetypes.sort()
length = lambda selection : len(selection.nonzero()[0])
g = lambda key, s1, pcs = n.array([10., 25., 50., 75., 90. ]) : n.hstack(( length(s1), sc(catalog[key][s1], pcs) ))
sel_pg = lambda pgr : (catalog_zOk) & (catalog['PROGRAMNAME']==pgr)
sel_st = lambda pgr : (catalog_zOk) & (catalog['SOURCETYPE']==pgr)
sel0_pg = lambda pgr : (catalog_0) & (catalog['PROGRAMNAME']==pgr)
sel0_st = lambda pgr : (catalog_0) & (catalog['SOURCETYPE']==pgr)
all_galaxies = []
tpps = []
for pg in sourcetypes:
n_targets = length( (catalog['SOURCETYPE']==pg))
n_galaxies = length( sel0_st(pg) )
all_galaxies.append(n_galaxies)
n_all = length( sel_st(pg)) *1.
n_1 = length( (sel_st(pg))&(converged) )
n_2 = length( (sel_st(pg))&(dex04) )
n_3 = length( (sel_st(pg))&(dex02) )
if n_all>0 :
out = n.array([
n_targets,
n_galaxies, n.round(n_galaxies*100./n_targets,1),
n_all , n.round(n_targets*100./n_targets ,1),
n_1, n.round(n_1*100./n_all,1),
n_2, n.round(n_2*100./n_all,1),
n_3, n.round(n_3*100./n_all,1)
])
if n_all == 0 :
try :
out = n.array([
n_targets,
n_galaxies, n.round(n_galaxies*100./n_targets,1),
n_all , n.round(n_targets*100./n_targets ,1),
n_1, 0.,
n_2, 0.,
n_3, 0.
])
except(ZeroDivisionError):
out = n.array([
n_targets,
n_galaxies, 0.,
n_all , 0.,
n_1, 0.,
n_2, 0.,
n_3, 0.
])
tpp = pg + " & " + " & ".join(n.array([ str(int(el)) for el in out]) ) + ' \\\\ \n'
print( tpp)
tpps.append(tpp)
all_galaxies = n.array(all_galaxies)
tpps = n.array(tpps)
ids = n.argsort(all_galaxies)[::-1]
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_comp_"+survey+"_snr_all_sourcetype_N_Nsnr_Nconv_Ndex04_Ndex02.tex")
f=open(out_file, 'w')
#f.write('source type & N & \multicolumn{c}{2}{N galaxies} && \multicolumn{c}{2}{SNR ALL$>0$} & \\multicolumn{c}{2}{frefly converged} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.4$} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.2$} \\\\ \n')
#f.write(' & & N & % & & N & % & N & % & N & % \\\\ \n')
for jj in ids :
f.write( tpps[jj] )
f.close()
sys.exit()
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_comp_"+survey+"_snr_all_sourcetype_N_Nsnr_Nconv_Ndex04_Ndex02.tex")
f=open(out_file, 'w')
f.write('source type & N & N galaxies & SNR ALL$>0$ & firefly converged & err$<0.4$ & err$<0.2$ \\\\')
for pg in sourcetypes:
f.write(pg)
out = n.array([
length( (catalog['SOURCETYPE']==pg)),
length( sel0_st(pg) ),
length( sel_st(pg) ),
length( (sel_st(pg))&(converged) ),
length( (sel_st(pg))&(dex04) ),
length( (sel_st(pg))&(dex02) )
])
tpp = "".join(n.array([ ' & '+str(el) for el in out]) )
print(pg, tpp)
f.write( tpp )
f.write(' \\\\ \n')
f.close()
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_comp_"+survey+"_snr_all_programname.tex")
f=open(out_file, 'w')
for pg in program_names:
f.write(pg)
tpp = str( g('SNR_ALL', sel_pg(pg)) )[1:-1]
print(pg, tpp)
f.write( tpp )
f.write(' \\\\ \n')
f.close()
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_comp_"+survey+"_snr_all_sourcetype.tex")
f=open(out_file, 'w')
for pg in sourcetypes:
f.write(pg)
tpp = str( g('SNR_ALL', sel_st(pg)) )[1:-1]
print(pg, tpp)
f.write( tpp )
f.write(' \\\\ \n')
f.close()
#converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
#dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.8 )
#dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < 0.4 )
#m_catalog = n.log10(catalog[prefix+'stellar_mass'])
#w_catalog = n.ones_like(catalog[prefix+'stellar_mass'])
#print(ld(catalog_zOk))
#return name + " & $"+ sld(converged)+"$ ("+str(n.round(ld(converged)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex04)+"$ ("+str(n.round(ld(dex04)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex02)+ "$ ("+str(n.round(ld(dex02)/ld(catalog_zOk)*100.,1))+r") \\\\"
##return catalog_sel, m_catalog, w_catalog
sys.exit()
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_deep2(deep2, 'ZBEST', 'ZQUALITY', prf, 2., IMF, o2=False)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(boss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(boss, 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO', prf, 0., IMF)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(sdss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', prf, 0., IMF)
f.write(l2w + " \n")
f.write('\\hline \n')
f.close()
#"""
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_2_r.tex")
f=open(out_file, 'w')
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_deep2(deep2, 'ZBEST', 'ZQUALITY', prf, 2., IMF, o2=True)
f.write(l2w + " \n")
f.close()
| cc0-1.0 |
ioam/holoviews | holoviews/core/data/pandas.py | 2 | 13516 | from __future__ import absolute_import
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
import pandas as pd
from .interface import Interface, DataError
from ..dimension import dimension_name
from ..element import Element
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check, sorted_context
from .. import util
class PandasInterface(Interface):
types = (pd.DataFrame if pd else None,)
datatype = 'dataframe'
@classmethod
def dimension_type(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
idx = list(dataset.data.columns).index(name)
return dataset.data.dtypes[idx].type
@classmethod
def init(cls, eltype, data, kdims, vdims):
element_params = eltype.params()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
if util.is_series(data):
data = data.to_frame()
if util.is_dataframe(data):
ncols = len(data.columns)
index_names = data.index.names if isinstance(data, pd.DataFrame) else [data.index.name]
if index_names == [None]:
index_names = ['index']
if eltype._auto_indexable_1d and ncols == 1 and kdims is None:
kdims = list(index_names)
if isinstance(kdim_param.bounds[1], int):
ndim = min([kdim_param.bounds[1], len(kdim_param.default)])
else:
ndim = None
nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None
if kdims and vdims is None:
vdims = [c for c in data.columns if c not in kdims]
elif vdims and kdims is None:
kdims = [c for c in data.columns if c not in vdims][:ndim]
elif kdims is None:
kdims = list(data.columns[:ndim])
if vdims is None:
vdims = [d for d in data.columns[ndim:((ndim+nvdim) if nvdim else None)]
if d not in kdims]
elif kdims == [] and vdims is None:
vdims = list(data.columns[:nvdim if nvdim else None])
# Handle reset of index if kdims reference index by name
for kd in kdims:
kd = dimension_name(kd)
if kd in data.columns:
continue
if any(kd == ('index' if name is None else name)
for name in index_names):
data = data.reset_index()
break
if any(isinstance(d, (np.int64, int)) for d in kdims+vdims):
raise DataError("pandas DataFrame column names used as dimensions "
"must be strings not integers.", cls)
if kdims:
kdim = dimension_name(kdims[0])
if eltype._auto_indexable_1d and ncols == 1 and kdim not in data.columns:
data = data.copy()
data.insert(0, kdim, np.arange(len(data)))
for d in kdims+vdims:
d = dimension_name(d)
if len([c for c in data.columns if c == d]) > 1:
raise DataError('Dimensions may not reference duplicated DataFrame '
'columns (found duplicate %r columns). If you want to plot '
'a column against itself simply declare two dimensions '
'with the same name. '% d, cls)
else:
# Check if data is of non-numeric type
# Then use defined data type
kdims = kdims if kdims else kdim_param.default
vdims = vdims if vdims else vdim_param.default
columns = [dimension_name(d) for d in kdims+vdims]
if isinstance(data, dict) and all(c in data for c in columns):
data = cyODict(((d, data[d]) for d in columns))
elif isinstance(data, list) and len(data) == 0:
data = {c: np.array([]) for c in columns}
elif isinstance(data, (list, dict)) and data in ([], {}):
data = None
elif (isinstance(data, dict) and not all(d in data for d in columns) and
not any(isinstance(v, np.ndarray) for v in data.values())):
column_data = sorted(data.items())
k, v = column_data[0]
if len(util.wrap_tuple(k)) != len(kdims) or len(util.wrap_tuple(v)) != len(vdims):
raise ValueError("Dictionary data not understood, should contain a column "
"per dimension or a mapping between key and value dimension "
"values.")
column_data = zip(*((util.wrap_tuple(k)+util.wrap_tuple(v))
for k, v in column_data))
data = cyODict(((c, col) for c, col in zip(columns, column_data)))
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
data = (np.arange(len(data)), data)
else:
data = np.atleast_2d(data).T
else:
data = tuple(data[:, i] for i in range(data.shape[1]))
if isinstance(data, tuple):
data = [np.array(d) if not isinstance(d, np.ndarray) else d for d in data]
if not cls.expanded(data):
raise ValueError('PandasInterface expects data to be of uniform shape.')
data = pd.DataFrame(dict(zip(columns, data)), columns=columns)
elif ((isinstance(data, dict) and any(c not in data for c in columns)) or
(isinstance(data, list) and any(isinstance(d, dict) and c not in d for d in data for c in columns))):
raise ValueError('PandasInterface could not find specified dimensions in the data.')
else:
data = pd.DataFrame(data, columns=columns)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def isscalar(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
return len(dataset.data[name].unique()) == 1
@classmethod
def validate(cls, dataset, vdims=True):
dim_types = 'all' if vdims else 'key'
dimensions = dataset.dimensions(dim_types, label='name')
not_found = [d for d in dimensions if d not in dataset.data.columns]
if not_found:
raise DataError("Supplied data does not contain specified "
"dimensions, the following dimensions were "
"not found: %s" % repr(not_found), cls)
@classmethod
def range(cls, dataset, dimension):
column = dataset.data[dataset.get_dimension(dimension, strict=True).name]
if column.dtype.kind == 'O':
if (not isinstance(dataset.data, pd.DataFrame) or
util.LooseVersion(pd.__version__) < '0.17.0'):
column = column.sort(inplace=False)
else:
column = column.sort_values()
column = column[~column.isin([None])]
if not len(column):
return np.NaN, np.NaN
return column.iloc[0], column.iloc[-1]
else:
return (column.min(), column.max())
@classmethod
def concat(cls, datasets, dimensions, vdims):
dataframes = []
for key, ds in datasets:
data = ds.data.copy()
for d, k in zip(dimensions, key):
data[d.name] = k
dataframes.append(data)
kwargs = dict(sort=False) if util.pandas_version >= '0.23.0' else {}
return pd.concat(dataframes, **kwargs)
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
index_dims = [dataset.get_dimension(d, strict=True) for d in dimensions]
element_dims = [kdim for kdim in dataset.kdims
if kdim not in index_dims]
group_kwargs = {}
if group_type != 'raw' and issubclass(group_type, Element):
group_kwargs = dict(util.get_param_values(dataset),
kdims=element_dims)
group_kwargs.update(kwargs)
group_by = [d.name for d in index_dims]
data = [(k, group_type(v, **group_kwargs)) for k, v in
dataset.data.groupby(group_by, sort=False)]
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(data, kdims=index_dims)
else:
return container_type(data)
@classmethod
def aggregate(cls, dataset, dimensions, function, **kwargs):
data = dataset.data
cols = [d.name for d in dataset.kdims if d in dimensions]
vdims = dataset.dimensions('value', label='name')
reindexed = data[cols+vdims]
if function in [np.std, np.var]:
# Fix for consistency with other backend
# pandas uses ddof=1 for std and var
fn = lambda x: function(x, ddof=0)
else:
fn = function
if len(dimensions):
grouped = reindexed.groupby(cols, sort=False)
df = grouped.aggregate(fn, **kwargs).reset_index()
else:
agg = reindexed.apply(fn, **kwargs)
data = dict(((col, [v]) for col, v in zip(agg.index, agg.values)))
df = pd.DataFrame(data, columns=list(agg.index))
dropped = []
for vd in vdims:
if vd not in df.columns:
dropped.append(vd)
return df, dropped
@classmethod
def unpack_scalar(cls, dataset, data):
"""
Given a dataset object and data in the appropriate format for
the interface, return a simple scalar.
"""
if len(data) != 1 or len(data.columns) > 1:
return data
return data.iat[0,0]
@classmethod
def reindex(cls, dataset, kdims=None, vdims=None):
# DataFrame based tables don't need to be reindexed
return dataset.data
@classmethod
def redim(cls, dataset, dimensions):
column_renames = {k: v.name for k, v in dimensions.items()}
return dataset.data.rename(columns=column_renames)
@classmethod
def sort(cls, dataset, by=[], reverse=False):
import pandas as pd
cols = [dataset.get_dimension(d, strict=True).name for d in by]
if (not isinstance(dataset.data, pd.DataFrame) or
util.LooseVersion(pd.__version__) < '0.17.0'):
return dataset.data.sort(columns=cols, ascending=not reverse)
return dataset.data.sort_values(by=cols, ascending=not reverse)
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
df = dataset.data
if selection_mask is None:
selection_mask = cls.select_mask(dataset, selection)
indexed = cls.indexed(dataset, selection)
df = df.iloc[selection_mask]
if indexed and len(df) == 1 and len(dataset.vdims) == 1:
return df[dataset.vdims[0].name].iloc[0]
return df
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True):
dim = dataset.get_dimension(dim, strict=True)
data = dataset.data[dim.name]
if not expanded:
return data.unique()
return data.values
@classmethod
def sample(cls, dataset, samples=[]):
data = dataset.data
mask = False
for sample in samples:
sample_mask = True
if np.isscalar(sample): sample = [sample]
for i, v in enumerate(sample):
sample_mask = np.logical_and(sample_mask, data.iloc[:, i]==v)
mask |= sample_mask
return data[mask]
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
data = dataset.data.copy()
if dimension.name not in data:
data.insert(dim_pos, dimension.name, values)
return data
@classmethod
def as_dframe(cls, dataset):
"""
Returns the data of a Dataset as a dataframe avoiding copying
if it already a dataframe type.
"""
if issubclass(dataset.interface, PandasInterface):
return dataset.data
else:
return dataset.dframe()
@classmethod
def dframe(cls, dataset, dimensions):
if dimensions:
return dataset.data[dimensions]
else:
return dataset.data.copy()
@classmethod
def iloc(cls, dataset, index):
rows, cols = index
scalar = False
columns = list(dataset.data.columns)
if isinstance(cols, slice):
cols = [d.name for d in dataset.dimensions()][cols]
elif np.isscalar(cols):
scalar = np.isscalar(rows)
cols = [dataset.get_dimension(cols).name]
else:
cols = [dataset.get_dimension(d).name for d in index[1]]
cols = [columns.index(c) for c in cols]
if np.isscalar(rows):
rows = [rows]
if scalar:
return dataset.data.iloc[rows[0], cols[0]]
return dataset.data.iloc[rows, cols]
Interface.register(PandasInterface)
| bsd-3-clause |
ichmonkey/graph | band.py | 1 | 3690 | """
You can use the proper typesetting unicode minus (see
http://en.wikipedia.org/wiki/Plus_sign#Plus_sign) or the ASCII hypen
for minus, which some people prefer. The matplotlibrc param
ax1es.unicode_minus controls the default behavior.
The default is to use the unicode minus
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.gridspec as gridspec
import sys,re,string
def readData(inputFile):
Data=[]
f=open(inputFile)
lines=f.readlines()
nSet=len(lines)/2
#~ print nSet
eFermi=[6.207862,5.642064,5.013502]
i=0
for i in range(nSet):
label='band'
X=[float(x) for x in lines[i*2+0].split()]
Y=[float(y)-eFermi[i] for y in lines[i*2+1].split()]
Data.append([label,X,Y])
i+=1
f.close()
return Data
def draw(file='band.dat'):
titleFontSize=18
markerSize=11
lineWidth=3
matplotlib.rcParams['axes.unicode_minus'] = False
fig = plt.figure(figsize=(9.5,7))
#~ plt.subplots_adjust(top=0.92,bottom=0.08,left =0.1,right =0.95,hspace=0.4,wspace=0.3)
#~ band1
#~ gs1=gridspec.GridSpec(2,2)
#~ gs1.update(left=0.1, right=0.47, wspace=0.0)
ax2 = fig.add_subplot(111)
ax2.tick_params(direction='in', labelleft='on',labelright='off')
Data=readData(file)
lineSet=['bo','ro','go']
i=0
for data in Data:
labelt=data[0]
X=data[1]
Y=data[2]
ax2.plot(X,Y,'ko',label=labelt,markersize=5,linewidth=lineWidth,markeredgewidth =0)
i+=1
ax2.yaxis.set_major_locator(MultipleLocator(1))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%d'))
ax2.yaxis.set_minor_locator(MultipleLocator(0.1))
#~ ax2.set_ylim(-7,2)
ax2.set_ylabel('E (eV)',size=15)
plt.show()
def readBand(file='EIGENVAL'):
fw=open('bandOUT.txt','w')
fdat=open('band.dat','w')
eFermi=0
k=(0,0,0)
kold=(0,0,0)
dk=0.0
kp=0.0
K=[]
En=[]
f=open(file,'r')
for i in range(7):
f.readline()
for line in f.readlines():
m= re.match(r'\s+(-*[0-9].[0-9]+E[+-][0-9]+)\s+(-*[0-9].[0-9]+E[+-][0-9]+)\s+(-*[0-9].[0-9]+E[+-][0-9]+)',line)
if m :
"""
k point distance calculation
"""
k=( float(m.group(1)) , float(m.group(2)) , float(m.group(3)) )
if dk < 1000 :
dk=pow(pow(k[0]-kold[0],2)+pow(k[1]-kold[1],2)+pow(k[2]-kold[2],2),0.5)
if dk>0.2:
dk=0
else:
dk=0
kold=k
kp=kp+dk
#print "matched"
#~ if len(band)>0:
#~ bands.append(band)
#~ band=[]
else:
if len(line)>2:
fw.write(str(kp)+'\t'+line[0:len(line)-2]+'\n')
K.append(str(kp))
En.append(str((float(line.split()[1])-eFermi)))
#~ print str(kp)+'\t'+line[0:len(line)-2].strip()
for i in range(len(K)):
fw.write(str(K[i])+'\t'+En[i]+'\n')
for k in K:
fdat.write(k+' ')
fdat.write('\n')
for en in En:
fdat.write(en+' ')
f.close()
fw.close()
fdat.close()
readBand('EIGENVAL')
draw() | gpl-2.0 |
timothy1191xa/project-epsilon-1 | code/utils/tests/test_organize_behavior_data.py | 4 | 2554 | """ Tests for organize_behavior_data function in glm module
This checks the organize_behavior_data function.
Tests on our dataset (ds005) because other dataset doesn't have the behav data.
Run at the tests directory with:
nosetests code/utils/tests/test_organize_behavior_data.py
"""
# Loading modules.
import numpy as np
import numpy.linalg as npl
import nibabel as nib
import pandas as pd
import os
import sys
from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
from organize_behavior_data import *
data_location=os.path.join(os.path.dirname(__file__), '../../../data/ds005/')
def test_load_in_dataframe():
""" tests whether the behavior data is loadede in data frame.
Testing on subject 2.
"""
run1 = pd.read_table(data_location+'sub002/behav/task001_run001/behavdata.txt')
run2 = pd.read_table(data_location+'sub002/behav/task001_run002/behavdata.txt')
run3 = pd.read_table(data_location+'sub002/behav/task001_run003/behavdata.txt')
#append all the runs in one pandas data frame
r=run1.append(run2)
run_total=r.append(run3)
run_total_array=run_total.as_matrix()
test_array=load_in_dataframe(2).as_matrix()
assert_array_equal(run_total_array, test_array)
def test_load_behav_txt():
""" tests whether the function is properly taking out the errors in the subject's
responses. (COMBINED RUNS)
Testing on subject 2.
"""
fixedshape = (248,7)
behav1=np.loadtxt(data_location+'sub002/behav/task001_run001/behavdata.txt',skiprows=1)
behav2=np.loadtxt(data_location+'sub002/behav/task001_run002/behavdata.txt',skiprows=1)
behav3=np.loadtxt(data_location+'sub002/behav/task001_run003/behavdata.txt',skiprows=1)
#concatenate them to be 1
behav=np.concatenate((behav1,behav2,behav3),axis=0)
#check if the shape is same
assert_equal(load_behav_txt(2).shape,fixedshape)
#check if there is any error is not taken out yet
assert_equal(np.where(load_behav_txt(2)[:,5]==-1),np.array([]).reshape(1,0))
def test_load_behav_text_one():
""" tests whether the function is properly taking out the errors in the subject's
responses. (SINGLE RUN)
Testing on subject 2 run001.
"""
fixedshape = (84,7)
behav1=np.loadtxt(data_location+'sub002/behav/task001_run001/behavdata.txt',skiprows=1)
#check if the shape is same
assert_equal(load_behav_text_one(2,1).shape,fixedshape)
#check if there is any error is not taken out yet
assert_equal(np.where(load_behav_text_one(2,1)[:,5]==-1),np.array([]).reshape(1,0))
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py | 84 | 17929 | # Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.special import gammaln
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm
from sklearn.mixture.bayesian_mixture import _log_wishart_norm
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture.tests.test_gaussian_mixture import RandomData
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_greater_equal, ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
PRIOR_TYPE = ['dirichlet_process', 'dirichlet_distribution']
def test_log_dirichlet_norm():
rng = np.random.RandomState(0)
weight_concentration = rng.rand(2)
expected_norm = (gammaln(np.sum(weight_concentration)) -
np.sum(gammaln(weight_concentration)))
predected_norm = _log_dirichlet_norm(weight_concentration)
assert_almost_equal(expected_norm, predected_norm)
def test_log_wishart_norm():
rng = np.random.RandomState(0)
n_components, n_features = 5, 2
degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.
log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
expected_norm = np.empty(5)
for k, (degrees_of_freedom_k, log_det_k) in enumerate(
zip(degrees_of_freedom, log_det_precisions_chol)):
expected_norm[k] = -(
degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) +
np.sum(gammaln(.5 * (degrees_of_freedom_k -
np.arange(0, n_features)[:, np.newaxis])), 0))
predected_norm = _log_wishart_norm(degrees_of_freedom,
log_det_precisions_chol, n_features)
assert_almost_equal(expected_norm, predected_norm)
def test_bayesian_mixture_covariance_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
covariance_type = 'bad_covariance_type'
bgmm = BayesianGaussianMixture(covariance_type=covariance_type,
random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type, bgmm.fit, X)
def test_bayesian_mixture_weight_concentration_prior_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
bad_prior_type = 'bad_prior_type'
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=bad_prior_type, random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'weight_concentration_prior_type':"
" %s 'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% bad_prior_type, bgmm.fit, X)
def test_bayesian_mixture_weights_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 5, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of weight_concentration_prior
bad_weight_concentration_prior_ = 0.
bgmm = BayesianGaussianMixture(
weight_concentration_prior=bad_weight_concentration_prior_,
random_state=0)
assert_raise_message(ValueError,
"The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% bad_weight_concentration_prior_,
bgmm.fit, X)
# Check correct init for a given value of weight_concentration_prior
weight_concentration_prior = rng.rand()
bgmm = BayesianGaussianMixture(
weight_concentration_prior=weight_concentration_prior,
random_state=rng).fit(X)
assert_almost_equal(weight_concentration_prior,
bgmm.weight_concentration_prior_)
# Check correct init for the default value of weight_concentration_prior
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(1. / n_components, bgmm.weight_concentration_prior_)
def test_bayesian_mixture_means_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 3, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of mean_precision_prior
bad_mean_precision_prior_ = 0.
bgmm = BayesianGaussianMixture(
mean_precision_prior=bad_mean_precision_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% bad_mean_precision_prior_,
bgmm.fit, X)
# Check correct init for a given value of mean_precision_prior
mean_precision_prior = rng.rand()
bgmm = BayesianGaussianMixture(
mean_precision_prior=mean_precision_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
# Check correct init for the default value of mean_precision_prior
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
assert_almost_equal(1., bgmm.mean_precision_prior_)
# Check raise message for a bad shape of mean_prior
mean_prior = rng.rand(n_features + 1)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
bgmm.fit, X)
# Check correct init for a given value of mean_prior
mean_prior = rng.rand(n_features)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_prior, bgmm.mean_prior_)
# Check correct init for the default value of bemean_priorta
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
def test_bayesian_mixture_precisions_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of degrees_of_freedom_prior
bad_degrees_of_freedom_prior_ = n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=bad_degrees_of_freedom_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'degrees_of_freedom_prior' should be "
"greater than %d, but got %.3f."
% (n_features - 1, bad_degrees_of_freedom_prior_),
bgmm.fit, X)
# Check correct init for a given value of degrees_of_freedom_prior
degrees_of_freedom_prior = rng.rand() + n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior,
bgmm.degrees_of_freedom_prior_)
# Check correct init for the default value of degrees_of_freedom_prior
degrees_of_freedom_prior_default = n_features
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior_default,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior_default,
bgmm.degrees_of_freedom_prior_)
# Check correct init for a given value of covariance_prior
covariance_prior = {
'full': np.cov(X.T, bias=1) + 10,
'tied': np.cov(X.T, bias=1) + 5,
'diag': np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
'spherical': rng.rand()}
bgmm = BayesianGaussianMixture(random_state=rng)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.covariance_prior = covariance_prior[cov_type]
bgmm.fit(X)
assert_almost_equal(covariance_prior[cov_type],
bgmm.covariance_prior_)
# Check raise message for a bad spherical value of covariance_prior
bad_covariance_prior_ = -1.
bgmm = BayesianGaussianMixture(covariance_type='spherical',
covariance_prior=bad_covariance_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% bad_covariance_prior_,
bgmm.fit, X)
# Check correct init for the default value of covariance_prior
covariance_prior_default = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()}
bgmm = BayesianGaussianMixture(random_state=0)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.fit(X)
assert_almost_equal(covariance_prior_default[cov_type],
bgmm.covariance_prior_)
def test_bayesian_mixture_check_is_fitted():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
# Check raise message
bgmm = BayesianGaussianMixture(random_state=rng)
X = rng.rand(n_samples, n_features)
assert_raise_message(ValueError,
'This BayesianGaussianMixture instance is not '
'fitted yet.', bgmm.score, X)
def test_bayesian_mixture_weights():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Case Dirichlet distribution for the weight concentration prior type
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=3, random_state=rng).fit(X)
expected_weights = (bgmm.weight_concentration_ /
np.sum(bgmm.weight_concentration_))
assert_almost_equal(expected_weights, bgmm.weights_)
assert_almost_equal(np.sum(bgmm.weights_), 1.0)
# Case Dirichlet process for the weight concentration prior type
dpgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=3, random_state=rng).fit(X)
weight_dirichlet_sum = (dpgmm.weight_concentration_[0] +
dpgmm.weight_concentration_[1])
tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum
expected_weights = (dpgmm.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
expected_weights /= np.sum(expected_weights)
assert_almost_equal(expected_weights, dpgmm.weights_)
assert_almost_equal(np.sum(dpgmm.weights_), 1.0)
@ignore_warnings(category=ConvergenceWarning)
def test_monotonic_likelihood():
# We check that each step of the each step of variational inference without
# regularization improve monotonically the training set of the bound
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=20)
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type=covar_type,
warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
current_lower_bound = -np.infty
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_lower_bound = current_lower_bound
current_lower_bound = bgmm.fit(X).lower_bound_
assert_greater_equal(current_lower_bound, prev_lower_bound)
if bgmm.converged_:
break
assert(bgmm.converged_)
def test_compare_covar_type():
# We can compare the 'full' precision with the other cov_type if we apply
# 1 iter of the M-step (done during _initialize_parameters).
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
X = rand_data.X['full']
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='full',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
full_covariances = (
bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis])
# Check tied_covariance = mean(full_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='tied',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
# Check diag_covariance = diag(full_covariances)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='diag',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
diag_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis])
assert_almost_equal(diag_covariances,
np.array([np.diag(cov)
for cov in full_covariances]))
# Check spherical_covariance = np.mean(diag_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='spherical',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(
spherical_covariances, np.mean(diag_covariances, 1))
@ignore_warnings(category=ConvergenceWarning)
def test_check_covariance_precision():
# We check that the dot product of the covariance and the precision
# matrices is identity.
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components, n_features = 2 * rand_data.n_components, 2
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=n_components,
max_iter=100, random_state=rng, tol=1e-3,
reg_covar=0)
for covar_type in COVARIANCE_TYPE:
bgmm.covariance_type = covar_type
bgmm.fit(rand_data.X[covar_type])
if covar_type == 'full':
for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
assert_almost_equal(np.dot(covar, precision),
np.eye(n_features))
elif covar_type == 'tied':
assert_almost_equal(np.dot(bgmm.covariances_, bgmm.precisions_),
np.eye(n_features))
elif covar_type == 'diag':
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones((n_components, n_features)))
else:
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones(n_components))
@ignore_warnings(category=ConvergenceWarning)
def test_invariant_translation():
# We check here that adding a constant in the data change correctly the
# parameters of the mixture
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=100)
n_components = 2 * rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm1 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X)
bgmm2 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X + 100)
assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)
assert_almost_equal(bgmm1.weights_, bgmm2.weights_)
assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_)
| mit |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_ps.py | 69 | 50262 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import division
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import gettempdir
from cStringIO import StringIO
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import IdentityTransform
import numpy as npy
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or numerix arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return npy.alltrue(npy.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
if rcParams['path.simplify']:
self.simplify = (width * imagedpi, height * imagedpi)
else:
self.simplify = None
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self.hatch = None
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def set_hatch(self, hatch):
"""
hatch can be one of:
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
X - crossed diagonal
letters can be combined, in which case all the specified
hatchings are done
if same letter repeats, it increases the density of hatching
in that direction
"""
hatches = {'horiz':0, 'vert':0, 'diag1':0, 'diag2':0}
for letter in hatch:
if (letter == '/'): hatches['diag2'] += 1
elif (letter == '\\'): hatches['diag1'] += 1
elif (letter == '|'): hatches['vert'] += 1
elif (letter == '-'): hatches['horiz'] += 1
elif (letter == '+'):
hatches['horiz'] += 1
hatches['vert'] += 1
elif (letter.lower() == 'x'):
hatches['diag1'] += 1
hatches['diag2'] += 1
def do_hatch(angle, density):
if (density == 0): return ""
return """\
gsave
eoclip %s rotate 0.0 0.0 0.0 0.0 setrgbcolor 0 setlinewidth
/hatchgap %d def
pathbbox /hatchb exch def /hatchr exch def /hatcht exch def /hatchl exch def
hatchl cvi hatchgap idiv hatchgap mul
hatchgap
hatchr cvi hatchgap idiv hatchgap mul
{hatcht m 0 hatchb hatcht sub r }
for
stroke
grestore
""" % (angle, 12/density)
self._pswriter.write("gsave\n")
self._pswriter.write(do_hatch(90, hatches['horiz']))
self._pswriter.write(do_hatch(0, hatches['vert']))
self._pswriter.write(do_hatch(45, hatches['diag1']))
self._pswriter.write(do_hatch(-45, hatches['diag2']))
self._pswriter.write("grestore\n")
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
l,b,r,t = texmanager.get_ps_bbox(s, fontsize)
w = (r-l)
h = (t-b)
# TODO: We need a way to get a good baseline from
# text.usetex
return w, h, 0
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm')
font = self.afmfontd.get(fname)
if font is None:
font = AFM(file(findfont(prop, fontext='afm')))
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
im.flipud_out()
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
hexlines = '\n'.join(self._hex_lines(bits))
xscale, yscale = (
w/self.image_magnification, h/self.image_magnification)
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, simplify=None):
path = transform.transform_path(path)
ps = []
last_points = None
for points, code in path.iter_segments(simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
ps = self._convert_path(path, transform, self.simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
ps_cmd.append(self._convert_path(marker_path, marker_trans))
if rgbFace:
ps_cmd.extend(['gsave', ps_color, 'fill', 'grestore'])
ps_cmd.extend(['stroke', 'grestore', '} bind def'])
tpath = trans.transform_path(path)
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_codes, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
corr = 0#w/2*(fontsize-10)/10
pos = _nums_to_str(x-corr, y)
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif isinstance(s, unicode):
return self.draw_unicode(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
font = self._get_font_afm(prop)
l,b,w,h = font.get_str_bbox(s)
fontsize = prop.get_size_in_points()
l *= 0.001*fontsize
b *= 0.001*fontsize
w *= 0.001*fontsize
h *= 0.001*fontsize
if angle==90: l,b = -b, l # todo generalize for arb rotations
pos = _nums_to_str(x-l, y-b)
thetext = '(%s)' % s
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
rotate = '%1.1f rotate' % angle
setcolor = '%1.3f %1.3f %1.3f setrgbcolor' % gc.get_rgb()[:3]
#h = 0
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(pos)s moveto
%(rotate)s
%(thetext)s
%(setcolor)s
show
grestore
""" % locals()
self._draw_ps(ps, gc, None)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
write("%s m\n"%_nums_to_str(x,y))
if angle:
write("gsave\n")
write("%s rotate\n"%_num_to_str(angle))
descent = font.get_descent() / 64.0
if descent:
write("0 %s rmoveto\n"%_num_to_str(descent))
write("(%s) show\n"%quote_ps_string(s))
if angle:
write("grestore\n")
def new_gc(self):
return GraphicsContextPS()
def draw_unicode(self, gc, x, y, s, prop, angle):
"""draw a unicode string. ps doesn't have unicode support, so
we have to do this the hard way
"""
if rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0.0 and
(len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0))
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\ngrestore\n")
else:
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
hatch = gc.get_hatch()
if hatch:
self.set_hatch(hatch)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPS(thisFig)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.get("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.keys() )) )
orientation = kwargs.get("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.get("dpi", 72)
facecolor = kwargs.get("facecolor", "w")
edgecolor = kwargs.get("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
elif is_writable_file_like(outfile):
title = None
tmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest())
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
fh = file(tmpfile, 'w')
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the PostScript headers
if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
else: print >>fh, "%!PS-Adobe-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%Orientation: " + orientation
if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
if not isEPSF: print >>fh, "%%Pages: 1"
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
if not rcParams['ps.useafm']:
Ndict += len(renderer.used_characters)
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
if not rcParams['ps.useafm']:
for font_filename, chars in renderer.used_characters.values():
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fonttype = rcParams['ps.fonttype']
convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids)
print >>fh, "end"
print >>fh, "%%EndProlog"
if not isEPSF: print >>fh, "%%Page: 1 1"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
if rotation: print >>fh, "%d rotate"%rotation
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
if not isEPSF: print >>fh, "%%EOF"
fh.close()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
fh = file(tmpfile)
print >>outfile, fh.read()
else:
shutil.move(tmpfile, outfile)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
# write to a temp file, we'll move it to outfile when done
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
fh = file(tmpfile, 'w')
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the Encapsulated PostScript headers
print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
print >>fh, "end"
print >>fh, "%%EndProlog"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
fh.close()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
convert_psfrags(tmpfile, renderer.psfrag, font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if isinstance(outfile, file):
fh = file(tmpfile)
print >>outfile, fh.read()
else: shutil.move(tmpfile, outfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
latexh = file(latexfile, 'w')
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s)
except UnicodeEncodeError, err:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
latexh.close()
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
paper = '-sPAPERSIZE=%s'% ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=pswrite %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, paper, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode -sPAPERSIZE=%s "%s" "%s" > "%s"'% \
(ptype, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, npy.ceil(r), npy.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox):
"""
Convert the postscript to encapsulated postscript.
"""
bbox_info = get_bbox(tmpfile, bbox)
epsfile = tmpfile + '.eps'
epsh = file(epsfile, 'w')
tmph = file(tmpfile)
line = tmph.readline()
# Modify the header:
while line:
if line.startswith('%!PS'):
print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0"
print >>epsh, bbox_info
elif line.startswith('%%EndComments'):
epsh.write(line)
print >>epsh, '%%BeginProlog'
print >>epsh, 'save'
print >>epsh, 'countdictstack'
print >>epsh, 'mark'
print >>epsh, 'newpath'
print >>epsh, '/showpage {} def'
print >>epsh, '/setpagedevice {pop} def'
print >>epsh, '%%EndProlog'
print >>epsh, '%%Page 1 1'
break
elif line.startswith('%%Bound') \
or line.startswith('%%HiResBound') \
or line.startswith('%%Pages'):
pass
else:
epsh.write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith('%%Trailer'):
print >>epsh, '%%Trailer'
print >>epsh, 'cleartomark'
print >>epsh, 'countdictstack'
print >>epsh, 'exch sub { end } repeat'
print >>epsh, 'restore'
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
else:
epsh.write(line)
line = tmph.readline()
tmph.close()
epsh.close()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| agpl-3.0 |
nvoron23/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
NunoEdgarGub1/ipython_extensions | extensions/retina.py | 4 | 2313 | """
Enable Retina (2x) PNG figures with matplotlib
Usage: %load_ext retina
"""
import struct
from base64 import encodestring
from io import BytesIO
def pngxy(data):
"""read the width/height from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def print_figure(fig, fmt='png', dpi=None):
"""Convert a figure to svg or png for inline display."""
import matplotlib
fc = fig.get_facecolor()
ec = fig.get_edgecolor()
bytes_io = BytesIO()
dpi = dpi or matplotlib.rcParams['savefig.dpi']
fig.canvas.print_figure(bytes_io, format=fmt, dpi=dpi,
bbox_inches='tight',
facecolor=fc, edgecolor=ec,
)
data = bytes_io.getvalue()
return data
def png2x(fig):
"""render figure to 2x PNG via HTML"""
import matplotlib
if not fig.axes and not fig.lines:
return
# double DPI
dpi = 2 * matplotlib.rcParams['savefig.dpi']
pngbytes = print_figure(fig, fmt='png', dpi=dpi)
x,y = pngxy(pngbytes)
x2x = x // 2
y2x = y // 2
png64 = encodestring(pngbytes).decode('ascii')
return u"<img src='data:image/png;base64,%s' width=%i height=%i/>" % (png64, x2x, y2x)
def enable_retina(ip):
"""enable retina figures"""
from matplotlib.figure import Figure
# unregister existing formatter(s):
png_formatter = ip.display_formatter.formatters['image/png']
png_formatter.type_printers.pop(Figure, None)
svg_formatter = ip.display_formatter.formatters['image/svg+xml']
svg_formatter.type_printers.pop(Figure, None)
# register png2x as HTML formatter
html_formatter = ip.display_formatter.formatters['text/html']
html_formatter.for_type(Figure, png2x)
def disable_retina(ip):
from matplotlib.figure import Figure
from IPython.core.pylabtools import select_figure_format
select_figure_format(ip, 'png')
html_formatter = ip.display_formatter.formatters['text/html']
html_formatter.type_printers.pop(Figure, None)
def load_ipython_extension(ip):
try:
enable_retina(ip)
except Exception as e:
print "Failed to load retina extension: %s" % e
def unload_ipython_extension(ip):
disable_retina(ip)
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/pylab_examples/finance_demo.py | 3 | 1146 | #!/usr/bin/env python
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, WeekdayLocator, HourLocator, \
DayLocator, MONDAY
from matplotlib.finance import quotes_historical_yahoo, candlestick,\
plot_day_summary, candlestick2
# (Year, month, day) tuples suffice as args for quotes_historical_yahoo
date1 = ( 2004, 2, 1)
date2 = ( 2004, 4, 12 )
mondays = WeekdayLocator(MONDAY) # major ticks on the mondays
alldays = DayLocator() # minor ticks on the days
weekFormatter = DateFormatter('%b %d') # e.g., Jan 12
dayFormatter = DateFormatter('%d') # e.g., 12
quotes = quotes_historical_yahoo('INTC', date1, date2)
if len(quotes) == 0:
raise SystemExit
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(weekFormatter)
#ax.xaxis.set_minor_formatter(dayFormatter)
#plot_day_summary(ax, quotes, ticksize=3)
candlestick(ax, quotes, width=0.6)
ax.xaxis_date()
ax.autoscale_view()
plt.setp( plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.show()
| apache-2.0 |
all-umass/metric-learn | metric_learn/lmnn.py | 1 | 10498 | """
Large-margin nearest neighbor metric learning. (Weinberger 2005)
LMNN learns a Mahanalobis distance metric in the kNN classification setting
using semidefinite programming.
The learned metric attempts to keep k-nearest neighbors in the same class,
while keeping examples from different classes separated by a large margin.
This algorithm makes no assumptions about the distribution of the data.
"""
#TODO: periodic recalculation of impostors, PCA initialization
from __future__ import print_function, absolute_import
import numpy as np
import warnings
from collections import Counter
from six.moves import xrange
from sklearn.metrics import euclidean_distances
from sklearn.base import TransformerMixin
from .base_metric import MahalanobisMixin
# commonality between LMNN implementations
class _base_LMNN(MahalanobisMixin, TransformerMixin):
def __init__(self, k=3, min_iter=50, max_iter=1000, learn_rate=1e-7,
regularization=0.5, convergence_tol=0.001, use_pca=True,
verbose=False, preprocessor=None):
"""Initialize the LMNN object.
Parameters
----------
k : int, optional
Number of neighbors to consider, not including self-edges.
regularization: float, optional
Weighting of pull and push terms, with 0.5 meaning equal weight.
preprocessor : array-like, shape=(n_samples, n_features) or callable
The preprocessor to call to get tuples from indices. If array-like,
tuples will be formed like this: X[indices].
"""
self.k = k
self.min_iter = min_iter
self.max_iter = max_iter
self.learn_rate = learn_rate
self.regularization = regularization
self.convergence_tol = convergence_tol
self.use_pca = use_pca
self.verbose = verbose
super(_base_LMNN, self).__init__(preprocessor)
# slower Python version
class python_LMNN(_base_LMNN):
def fit(self, X, y):
k = self.k
reg = self.regularization
learn_rate = self.learn_rate
X, y = self._prepare_inputs(X, y, dtype=float,
ensure_min_samples=2)
num_pts, num_dims = X.shape
unique_labels, label_inds = np.unique(y, return_inverse=True)
if len(label_inds) != num_pts:
raise ValueError('Must have one label per point.')
self.labels_ = np.arange(len(unique_labels))
if self.use_pca:
warnings.warn('use_pca does nothing for the python_LMNN implementation')
self.transformer_ = np.eye(num_dims)
required_k = np.bincount(label_inds).min()
if self.k > required_k:
raise ValueError('not enough class labels for specified k'
' (smallest class has %d)' % required_k)
target_neighbors = self._select_targets(X, label_inds)
impostors = self._find_impostors(target_neighbors[:, -1], X, label_inds)
if len(impostors) == 0:
# L has already been initialized to an identity matrix
return
# sum outer products
dfG = _sum_outer_products(X, target_neighbors.flatten(),
np.repeat(np.arange(X.shape[0]), k))
df = np.zeros_like(dfG)
# storage
a1 = [None]*k
a2 = [None]*k
for nn_idx in xrange(k):
a1[nn_idx] = np.array([])
a2[nn_idx] = np.array([])
# initialize L
L = self.transformer_
# first iteration: we compute variables (including objective and gradient)
# at initialization point
G, objective, total_active, df, a1, a2 = (
self._loss_grad(X, L, dfG, impostors, 1, k, reg, target_neighbors, df,
a1, a2))
# main loop
for it in xrange(2, self.max_iter):
# then at each iteration, we try to find a value of L that has better
# objective than the previous L, following the gradient:
while True:
# the next point next_L to try out is found by a gradient step
L_next = L - 2 * learn_rate * G
# we compute the objective at next point
# we copy variables that can be modified by _loss_grad, because if we
# retry we don t want to modify them several times
(G_next, objective_next, total_active_next, df_next, a1_next,
a2_next) = (
self._loss_grad(X, L_next, dfG, impostors, it, k, reg,
target_neighbors, df.copy(), list(a1), list(a2)))
assert not np.isnan(objective)
delta_obj = objective_next - objective
if delta_obj > 0:
# if we did not find a better objective, we retry with an L closer to
# the starting point, by decreasing the learning rate (making the
# gradient step smaller)
learn_rate /= 2
else:
# otherwise, if we indeed found a better obj, we get out of the loop
break
# when the better L is found (and the related variables), we set the
# old variables to these new ones before next iteration and we
# slightly increase the learning rate
L = L_next
G, df, objective, total_active, a1, a2 = (
G_next, df_next, objective_next, total_active_next, a1_next, a2_next)
learn_rate *= 1.01
if self.verbose:
print(it, objective, delta_obj, total_active, learn_rate)
# check for convergence
if it > self.min_iter and abs(delta_obj) < self.convergence_tol:
if self.verbose:
print("LMNN converged with objective", objective)
break
else:
if self.verbose:
print("LMNN didn't converge in %d steps." % self.max_iter)
# store the last L
self.transformer_ = L
self.n_iter_ = it
return self
def _loss_grad(self, X, L, dfG, impostors, it, k, reg, target_neighbors, df,
a1, a2):
# Compute pairwise distances under current metric
Lx = L.dot(X.T).T
g0 = _inplace_paired_L2(*Lx[impostors])
Ni = 1 + _inplace_paired_L2(Lx[target_neighbors], Lx[:, None, :])
g1, g2 = Ni[impostors]
# compute the gradient
total_active = 0
for nn_idx in reversed(xrange(k)):
act1 = g0 < g1[:, nn_idx]
act2 = g0 < g2[:, nn_idx]
total_active += act1.sum() + act2.sum()
if it > 1:
plus1 = act1 & ~a1[nn_idx]
minus1 = a1[nn_idx] & ~act1
plus2 = act2 & ~a2[nn_idx]
minus2 = a2[nn_idx] & ~act2
else:
plus1 = act1
plus2 = act2
minus1 = np.zeros(0, dtype=int)
minus2 = np.zeros(0, dtype=int)
targets = target_neighbors[:, nn_idx]
PLUS, pweight = _count_edges(plus1, plus2, impostors, targets)
df += _sum_outer_products(X, PLUS[:, 0], PLUS[:, 1], pweight)
MINUS, mweight = _count_edges(minus1, minus2, impostors, targets)
df -= _sum_outer_products(X, MINUS[:, 0], MINUS[:, 1], mweight)
in_imp, out_imp = impostors
df += _sum_outer_products(X, in_imp[minus1], out_imp[minus1])
df += _sum_outer_products(X, in_imp[minus2], out_imp[minus2])
df -= _sum_outer_products(X, in_imp[plus1], out_imp[plus1])
df -= _sum_outer_products(X, in_imp[plus2], out_imp[plus2])
a1[nn_idx] = act1
a2[nn_idx] = act2
# do the gradient update
assert not np.isnan(df).any()
G = dfG * reg + df * (1 - reg)
# compute the objective function
objective = total_active * (1 - reg)
objective += G.flatten().dot(L.T.dot(L).flatten())
return G, objective, total_active, df, a1, a2
def _select_targets(self, X, label_inds):
target_neighbors = np.empty((X.shape[0], self.k), dtype=int)
for label in self.labels_:
inds, = np.nonzero(label_inds == label)
dd = euclidean_distances(X[inds], squared=True)
np.fill_diagonal(dd, np.inf)
nn = np.argsort(dd)[..., :self.k]
target_neighbors[inds] = inds[nn]
return target_neighbors
def _find_impostors(self, furthest_neighbors, X, label_inds):
Lx = self.transform(X)
margin_radii = 1 + _inplace_paired_L2(Lx[furthest_neighbors], Lx)
impostors = []
for label in self.labels_[:-1]:
in_inds, = np.nonzero(label_inds == label)
out_inds, = np.nonzero(label_inds > label)
dist = euclidean_distances(Lx[out_inds], Lx[in_inds], squared=True)
i1,j1 = np.nonzero(dist < margin_radii[out_inds][:,None])
i2,j2 = np.nonzero(dist < margin_radii[in_inds])
i = np.hstack((i1,i2))
j = np.hstack((j1,j2))
if i.size > 0:
# get unique (i,j) pairs using index trickery
shape = (i.max()+1, j.max()+1)
tmp = np.ravel_multi_index((i,j), shape)
i,j = np.unravel_index(np.unique(tmp), shape)
impostors.append(np.vstack((in_inds[j], out_inds[i])))
if len(impostors) == 0:
# No impostors detected
return impostors
return np.hstack(impostors)
def _inplace_paired_L2(A, B):
'''Equivalent to ((A-B)**2).sum(axis=-1), but modifies A in place.'''
A -= B
return np.einsum('...ij,...ij->...i', A, A)
def _count_edges(act1, act2, impostors, targets):
imp = impostors[0,act1]
c = Counter(zip(imp, targets[imp]))
imp = impostors[1,act2]
c.update(zip(imp, targets[imp]))
if c:
active_pairs = np.array(list(c.keys()))
else:
active_pairs = np.empty((0,2), dtype=int)
return active_pairs, np.array(list(c.values()))
def _sum_outer_products(data, a_inds, b_inds, weights=None):
Xab = data[a_inds] - data[b_inds]
if weights is not None:
return np.dot(Xab.T, Xab * weights[:,None])
return np.dot(Xab.T, Xab)
try:
# use the fast C++ version, if available
from modshogun import LMNN as shogun_LMNN
from modshogun import RealFeatures, MulticlassLabels
class LMNN(_base_LMNN):
"""Large Margin Nearest Neighbor (LMNN)
Attributes
----------
n_iter_ : `int`
The number of iterations the solver has run.
transformer_ : `numpy.ndarray`, shape=(num_dims, n_features)
The learned linear transformation ``L``.
"""
def fit(self, X, y):
X, y = self._prepare_inputs(X, y, dtype=float,
ensure_min_samples=2)
labels = MulticlassLabels(y)
self._lmnn = shogun_LMNN(RealFeatures(X.T), labels, self.k)
self._lmnn.set_maxiter(self.max_iter)
self._lmnn.set_obj_threshold(self.convergence_tol)
self._lmnn.set_regularization(self.regularization)
self._lmnn.set_stepsize(self.learn_rate)
if self.use_pca:
self._lmnn.train()
else:
self._lmnn.train(np.eye(X.shape[1]))
self.transformer_ = self._lmnn.get_linear_transform(X)
return self
except ImportError:
LMNN = python_LMNN
| mit |
evgchz/scikit-learn | sklearn/neighbors/nearest_centroid.py | 10 | 7258 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..externals.six.moves import xrange
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in y_ind:
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, "centroids_"):
raise AttributeError("Model has not been trained yet.")
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
ashhher3/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 26 | 2911 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/sparse/panel.py | 2 | 18464 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None,
default_fill_value=np.nan, default_kind='block',
copy=False):
if frames is None:
frames = {}
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = \
SparseDataFrame(vals, index=major_axis,
columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames,
major_axis,
minor_axis) = _convert_frames(frames, major_axis,
minor_axis, kind=kind,
fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis,
self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values
for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, kind=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
return (self._frames, com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value, self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames))
d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d))
else:
new_data = self._frames.copy()
d['default_fill_value']=self.default_fill_value
d['default_kind']=self.default_kind
return SparsePanel(new_data, **d)
def to_frame(self, filter_observations=True):
"""
Convert SparsePanel to (dense) DataFrame
Returns
-------
frame : DataFrame
"""
if not filter_observations:
raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
counts = np.zeros(N * K, dtype=int)
d_values = {}
d_indexer = {}
for item in self.items:
frame = self[item]
values, major, minor = _stack_sparse_info(frame)
# values are stacked column-major
indexer = minor * N + major
counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
# have full set of observations for each item
mask = counts == I
# for each item, take mask values at index locations for those sparse
# values, and use that to select values
values = np.column_stack([d_values[item][mask.take(d_indexer[item])]
for item in self.items])
inds, = mask.nonzero()
# still column major
major_labels = inds % N
minor_labels = inds // N
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
verify_integrity=False)
df = DataFrame(values, index=index, columns=self.items)
return df.sortlevel(level=0)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def reindex(self, major=None, items=None, minor=None, major_axis=None,
minor_axis=None, copy=False):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major=major, major_axis=major_axis)
minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis)
if com._all_none(items, major, minor):
raise ValueError('Must specify at least one axis')
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise NotImplementedError('Reindexing with new items not yet '
'supported')
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy()) for k, v in compat.iteritems(new_frames))
return SparsePanel(new_frames, items=items,
major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combine(self, other, func, axis=0):
if isinstance(other, DataFrame):
return self._combineFrame(other, func, axis=axis)
elif isinstance(other, Panel):
return self._combinePanel(other, func)
elif np.isscalar(other):
new_frames = dict((k, func(v, other))
for k, v in compat.iteritems(self))
return self._new_like(new_frames)
def _combineFrame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
# TODO: make faster!
new_frames = {}
for item, item_slice in zip(self.items, new_values):
old_frame = self[item]
ofv = old_frame.default_fill_value
ok = old_frame.default_kind
new_frames[item] = SparseDataFrame(item_slice,
index=self.major_axis,
columns=self.minor_axis,
default_fill_value=ofv,
default_kind=ok)
return self._new_like(new_frames)
def _new_like(self, new_frames):
return SparsePanel(new_frames, self.items, self.major_axis,
self.minor_axis,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combinePanel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
new_frames = {}
for item in items:
new_frames[item] = func(this[item], other[item])
if not isinstance(other, SparsePanel):
new_default_fill = self.default_fill_value
else:
# maybe unnecessary
new_default_fill = func(self.default_fill_value,
other.default_fill_value)
return SparsePanel(new_frames, items, major, minor,
default_fill_value=new_default_fill,
default_kind=self.default_kind)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
slices = dict((k, v.xs(key)) for k, v in compat.iteritems(self))
return DataFrame(slices, index=self.minor_axis, columns=self.items)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : SparseDataFrame
index -> major axis, columns -> items
"""
slices = dict((k, v[key]) for k, v in compat.iteritems(self))
return SparseDataFrame(slices, index=self.major_axis,
columns=self.items,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
# TODO: allow SparsePanel to work with flex arithmetic.
# pow and mod only work for scalars for now
def pow(self, val, *args, **kwargs):
"""wrapper around `__pow__` (only works for scalar values)"""
return self.__pow__(val)
def mod(self, val, *args, **kwargs):
"""wrapper around `__mod__` (only works for scalar values"""
return self.__mod__(val)
# Sparse objects opt out of numexpr
SparsePanel._add_aggregate_operations(use_numexpr=False)
ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, **ops.panel_special_funcs)
SparseWidePanel = SparsePanel
def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'):
from pandas.core.panel import _get_combined_index
output = {}
for item, df in compat.iteritems(frames):
if not isinstance(df, SparseDataFrame):
df = SparseDataFrame(df, default_kind=kind,
default_fill_value=fill_value)
output[item] = df
if index is None:
all_indexes = [df.index for df in output.values()]
index = _get_combined_index(all_indexes)
if columns is None:
all_columns = [df.columns for df in output.values()]
columns = _get_combined_index(all_columns)
index = _ensure_index(index)
columns = _ensure_index(columns)
for item, df in compat.iteritems(output):
if not (df.index.equals(index) and df.columns.equals(columns)):
output[item] = df.reindex(index=index, columns=columns)
return output, index, columns
def _stack_sparse_info(frame):
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
for col in frame.columns:
series = frame[col]
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
sparse_values = np.concatenate(vals_to_concat)
return sparse_values, major_labels, minor_labels
| gpl-2.0 |
Eomys/MoSQITo | mosqito/tests/loudness/test_loudness_zwicker_stationary.py | 1 | 6111 | # -*- coding: utf-8 -*-
"""
@date Created on Mon Mar 23 2020
@author martin_g for Eomys
"""
# Third party imports
import numpy as np
import matplotlib.pyplot as plt
import pytest
# Local application imports
from mosqito.functions.loudness_zwicker.loudness_zwicker_stationary import (
loudness_zwicker_stationary,
)
from mosqito.functions.shared.load import load2oct3
@pytest.mark.loudness_zwst # to skip or run only loudness zwicker stationary tests
def test_loudness_zwicker_3oct():
"""Test function for the script loudness_zwicker_stationary
Test function for the script loudness_zwicker_stationary with
third octave band spectrum as input. The input spectrum is
provided by ISO 532-1 annex B2, the compliance is assessed
according to section 5.1 of the standard. One .png compliance
plot is generated.
Parameters
----------
None
Outputs
-------
None
"""
# Third octave levels as input for stationary loudness
# (from ISO 532-1 annex B2)
test_signal_1 = np.array(
[
-60,
-60,
78,
79,
89,
72,
80,
89,
75,
87,
85,
79,
86,
80,
71,
70,
72,
71,
72,
74,
69,
65,
67,
77,
68,
58,
45,
30.0,
]
)
signal = {
"data_file": "Test signal 1.txt",
"N": 83.296,
"N_specif_file": "mosqito/tests/loudness/data/ISO_532-1/test_signal_1.csv",
}
N, N_specific = loudness_zwicker_stationary(test_signal_1)
tst = check_compliance(N, N_specific, signal)
assert tst
@pytest.mark.loudness_zwst # to skip or run only loudness zwicker stationary tests
def test_loudness_zwicker_wav():
"""Test function for the script loudness_zwicker_stationary
Test function for the script loudness_zwicker_stationary with
.wav file as input. The input file is provided by ISO 532-1 annex
B3, the compliance is assessed according to section 5.1 of the
standard. One .png compliance plot is generated.
Parameters
----------
None
Outputs
-------
None
"""
# Test signal as input for stationary loudness
# (from ISO 532-1 annex B3)
signal = {
"data_file": "mosqito/tests/loudness/data/ISO_532-1/Test signal 3 (1 kHz 60 dB).wav",
"N": 4.019,
"N_specif_file": "mosqito/tests/loudness/data/ISO_532-1/test_signal_3.csv",
}
# Load signal and compute third octave band spectrum
third_spec = load2oct3(True, signal["data_file"], calib=2 * 2 ** 0.5)
# Compute Loudness
N, N_specific = loudness_zwicker_stationary(third_spec["values"])
# Check ISO 532-1 compliance
assert check_compliance(N, N_specific, signal)
def check_compliance(N, N_specific, iso_ref):
"""Check the comppiance of loudness calc. to ISO 532-1
Check the compliance of the input data N and N_specific
to section 5.1 of ISO 532-1 by using the reference data
described in dictionary iso_ref.
Parameters
----------
N : float
Calculated loudness [sones]
N_specific : numpy.ndarray
Specific loudness [sones/bark]
bark_axis : numpy.ndarray
Corresponding bark axis
iso_ref : dict
{
"data_file": <Path to reference input signal>,
"N": <Reference loudness value>,
"N_specif_file": <Path to reference calculated specific loudness>
}
Dictionary containing link to ref. data
Outputs
-------
tst : bool
Compliance to the reference data
"""
# Load ISO reference outputs
N_iso = iso_ref["N"]
N_specif_iso = np.genfromtxt(iso_ref["N_specif_file"], skip_header=1)
# Test for ISO 532-1 comformance (section 5.1)
tst_N = (
N >= N_iso * 0.95
and N <= N_iso * 1.05
and N >= N_iso - 0.1
and N <= N_iso + 0.1
)
tst_specif = (
N_specific >= np.amin([N_specif_iso * 0.95, N_specif_iso - 0.1], axis=0)
).all() and (
N_specific <= np.amax([N_specif_iso * 1.05, N_specif_iso + 0.1], axis=0)
).all()
tst = tst_N and tst_specif
# Define and plot the tolerance curves
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
tol_curve_min = np.amin([N_specif_iso * 0.95, N_specif_iso - 0.1], axis=0)
tol_curve_min[tol_curve_min < 0] = 0
tol_curve_max = np.amax([N_specif_iso * 1.05, N_specif_iso + 0.1], axis=0)
plt.plot(
bark_axis,
tol_curve_min,
color="red",
linestyle="solid",
label="5% tolerance",
linewidth=1,
)
plt.plot(
bark_axis, tol_curve_max, color="red", linestyle="solid", label="", linewidth=1
)
plt.legend()
# Compliance plot
plt.plot(bark_axis, N_specific, label="MOSQITO")
if tst_specif:
plt.text(
0.5,
0.5,
"Test passed (5% tolerance not exceeded)",
horizontalalignment="center",
verticalalignment="center",
transform=plt.gca().transAxes,
bbox=dict(facecolor="green", alpha=0.3),
)
else:
tst = 0
plt.text(
0.5,
0.5,
"Test not passed",
horizontalalignment="center",
verticalalignment="center",
transform=plt.gca().transAxes,
bbox=dict(facecolor="red", alpha=0.3),
)
if tst_N:
clr = "green"
else:
clr = "red"
plt.title("N = " + str(N) + " sone (ISO ref. " + str(N_iso) + " sone)", color=clr)
file_name = "_".join(iso_ref["data_file"].split(" "))
plt.savefig(
"mosqito/tests/loudness/output/test_loudness_zwicker_wav_"
+ file_name.split("/")[-1][:-4]
+ ".png",
format="png",
)
plt.clf()
return tst
# test de la fonction
if __name__ == "__main__":
test_loudness_zwicker_3oct()
| apache-2.0 |
maryklayne/Funcao | sympy/plotting/tests/test_plot_implicit.py | 17 | 2600 | import warnings
from sympy import (plot_implicit, cos, Symbol, Eq, sin, re, And, Or, exp, I,
tan, pi)
from sympy.plotting.plot import unset_show
from tempfile import NamedTemporaryFile
from sympy.utilities.pytest import skip
from sympy.external import import_module
#Set plots not to show
unset_show()
def tmp_file(name=''):
return NamedTemporaryFile(suffix='.png').name
def plot_and_save(name):
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
#implicit plot tests
plot_implicit(Eq(y, cos(x)), (x, -5, 5), (y, -2, 2)).save(tmp_file(name))
plot_implicit(Eq(y**2, x**3 - x), (x, -5, 5),
(y, -4, 4)).save(tmp_file(name))
plot_implicit(y > 1 / x, (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y < 1 / tan(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y >= 2 * sin(x) * cos(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y <= x**2, (x, -3, 3),
(y, -1, 5)).save(tmp_file(name))
#Test all input args for plot_implicit
plot_implicit(Eq(y**2, x**3 - x)).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False, points=500).save(tmp_file())
plot_implicit(y > x, (x, -5, 5)).save(tmp_file())
plot_implicit(And(y > exp(x), y > x + 2)).save(tmp_file())
plot_implicit(Or(y > x, y > -x)).save(tmp_file())
plot_implicit(x**2 - 1, (x, -5, 5)).save(tmp_file())
plot_implicit(x**2 - 1).save(tmp_file())
plot_implicit(y > x, depth=-5).save(tmp_file())
plot_implicit(y > x, depth=5).save(tmp_file())
plot_implicit(y > cos(x), adaptive=False).save(tmp_file())
plot_implicit(y < cos(x), adaptive=False).save(tmp_file())
plot_implicit(And(y > cos(x), Or(y > x, Eq(y, x)))).save(tmp_file())
plot_implicit(y - cos(pi / x)).save(tmp_file())
#Test plots which cannot be rendered using the adaptive algorithm
#TODO: catch the warning.
plot_implicit(Eq(y, re(cos(x) + I*sin(x)))).save(tmp_file(name))
with warnings.catch_warnings(record=True) as w:
plot_implicit(x**2 - 1, legend='An implicit plot').save(tmp_file())
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert 'No labeled objects found' in str(w[0].message)
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
plot_and_save('test')
else:
skip("Matplotlib not the default backend")
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
aolsux/SamuROI | samuroi/util/branchmaskcreator.py | 1 | 4856 | from matplotlib.patches import Polygon, Circle
from .maskcreator import MaskCreator
from ..masks.branch import BranchMask
from ..masks.circle import CircleMask
from ..util.branch import Branch
class BranchMaskCreator(MaskCreator):
default_radius = 5.
@MaskCreator.enabled.setter
def enabled(self, e):
"""Extend the active setter of MaskCreator to also remove any artists if deactivated"""
# call base class property setter
MaskCreator.enabled.fset(self, e)
# handle own derived stuff
if self.artist is not None:
self.artist.remove()
self.artist = None
self.x, self.y, self.r = [], [], []
self.update()
def __init__(self, axes, canvas, update, notify, enabled=False):
"""
Arguments:
axes, the axes where the interactive creation takes place
canvas, the figure canvas, required to connec to signals
update, a callable which will be called after adding a pixel to the current mask.
notify, a callable that will get evoked with the coordinates of all pixels of a finished mask.
enabled, should mask creation be enabled from the begininig (default False)
"""
self.artist = None
# container for x,y and radius values
self.x, self.y, self.r = [], [], []
super(BranchMaskCreator, self).__init__(axes=axes,
canvas=canvas,
update=update,
notify=notify,
enabled=enabled)
def onclick(self, event):
self.x.append(event.xdata)
self.y.append(event.ydata)
# reuse last radius for consecutive segments
if len(self.r) > 0:
self.r.append(self.r[-1])
else:
self.r.append(BranchMaskCreator.default_radius)
self.__update_artist()
self.update()
def __update_artist(self):
# check if this is the first point of a branch
if self.artist is None:
self.artist = Circle([self.x[0], self.y[0]], radius=self.r[0], fill=False,
lw=2, color='red')
self.axes.add_artist(self.artist)
elif len(self.x) == 0:
self.artist.remove()
self.artist = None
elif len(self.x) == 1:
self.artist.remove()
self.artist = Circle([self.x[0], self.y[0]], radius=self.r[0], fill=False,
lw=2, color='red')
self.axes.add_artist(self.artist)
# change from circle to polygon if more than 1 points are available
elif len(self.x) == 2:
self.artist.remove()
branch = Branch(x=self.x, y=self.y, z=[0 for i in self.x], r=self.r)
self.artist = Polygon(branch.outline, fill=False, color='red', lw=2)
self.axes.add_artist(self.artist)
else:
assert (len(self.x) > 2)
branch = Branch(x=self.x, y=self.y, z=[0 for i in self.x], r=self.r)
self.artist.set_xy(branch.outline)
def onkey(self, event):
if self.artist is not None:
if event.key == '+':
self.r[-1] = self.r[-1] + 1
self.__update_artist()
self.update()
elif event.key == '-':
self.r[-1] = self.r[-1] - 1
self.__update_artist()
self.update()
elif event.key == 'z':
print event
print dir(event)
self.r = self.r[:-1]
self.x = self.x[:-1]
self.y = self.y[:-1]
self.__update_artist()
self.update()
elif event.key == 'enter':
self.artist.remove()
self.update()
self.artist = None
if len(self.x) == 1:
# shift by 0.5 to compensate for pixel offset in imshow
mask = CircleMask(center=[self.x[0] + 0.5, self.y[0] + 0.5], radius=self.r[0])
else:
import numpy
dtype = [('x', float), ('y', float), ('z', float), ('radius', float)]
x = numpy.array(self.x) + 0.5
y = numpy.array(self.y) + 0.5
z = [0 for i in self.x]
r = self.r
data = numpy.rec.fromarrays([x, y, z, r], dtype=dtype)
# shift by 0.5 to compensate for pixel offset in imshow
mask = BranchMask(data=data)
self.x, self.y, self.r = [], [], []
self.notify(mask)
self.enabled = False
| mit |
bigdig/vnpy | vnpy/gateway/tiger/tiger_gateway.py | 2 | 19325 | """
Author: KeKe
Please install tiger-api before use.
pip install tigeropen
"""
from copy import copy
from datetime import datetime
from multiprocessing.dummy import Pool
from queue import Empty, Queue
import functools
import traceback
import pytz
import pandas as pd
from pandas import DataFrame
from tigeropen.tiger_open_config import TigerOpenClientConfig
from tigeropen.common.consts import Language, Currency, Market
from tigeropen.quote.quote_client import QuoteClient
from tigeropen.trade.trade_client import TradeClient
from tigeropen.trade.domain.order import OrderStatus
from tigeropen.push.push_client import PushClient
from tigeropen.common.exceptions import ApiException
from vnpy.trader.constant import Direction, Product, Status, OrderType, Exchange
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
SubscribeRequest,
OrderRequest,
CancelRequest,
)
PRODUCT_VT2TIGER = {
Product.EQUITY: "STK",
Product.OPTION: "OPT",
Product.WARRANT: "WAR",
Product.WARRANT: "IOPT",
Product.FUTURES: "FUT",
Product.OPTION: "FOP",
Product.FOREX: "CASH",
}
DIRECTION_VT2TIGER = {
Direction.LONG: "BUY",
Direction.SHORT: "SELL",
}
DIRECTION_TIGER2VT = {
"BUY": Direction.LONG,
"SELL": Direction.SHORT,
"sell": Direction.SHORT,
}
ORDERTYPE_VT2TIGER = {
OrderType.LIMIT: "LMT",
OrderType.MARKET: "MKT",
}
STATUS_TIGER2VT = {
OrderStatus.PENDING_NEW: Status.SUBMITTING,
OrderStatus.NEW: Status.SUBMITTING,
OrderStatus.HELD: Status.SUBMITTING,
OrderStatus.PARTIALLY_FILLED: Status.PARTTRADED,
OrderStatus.FILLED: Status.ALLTRADED,
OrderStatus.CANCELLED: Status.CANCELLED,
OrderStatus.PENDING_CANCEL: Status.CANCELLED,
OrderStatus.REJECTED: Status.REJECTED,
OrderStatus.EXPIRED: Status.NOTTRADED
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class TigerGateway(BaseGateway):
""""""
default_setting = {
"tiger_id": "",
"account": "",
"服务器": ["标准", "环球", "仿真"],
"private_key": "",
}
exchanges = [
Exchange.SEHK,
Exchange.SMART,
Exchange.SSE,
Exchange.SZSE
]
def __init__(self, event_engine):
"""Constructor"""
super(TigerGateway, self).__init__(event_engine, "TIGER")
self.tiger_id = ""
self.account = ""
self.server = ""
self.language = ""
self.client_config = None
self.quote_client = None
self.push_client = None
self.local_id = 1000000
self.tradeid = 0
self.active = False
self.queue = Queue()
self.pool = None
self.ID_TIGER2VT = {}
self.ID_VT2TIGER = {}
self.ticks = {}
self.trades = set()
self.contracts = {}
self.symbol_names = {}
self.push_connected = False
self.subscribed_symbols = set()
def run(self):
""""""
while self.active:
try:
func, args = self.queue.get(timeout=0.1)
func(*args)
except Empty:
pass
def add_task(self, func, *args):
""""""
self.queue.put((func, [*args]))
def connect(self, setting: dict):
""""""
self.private_key = setting["private_key"]
self.tiger_id = setting["tiger_id"]
self.server = setting["服务器"]
self.account = setting["account"]
self.languege = Language.zh_CN
# Start thread pool for REST call
self.active = True
self.pool = Pool(5)
self.pool.apply_async(self.run)
# Put connect task into quque.
self.init_client_config()
self.add_task(self.connect_quote)
self.add_task(self.connect_trade)
self.add_task(self.connect_push)
def init_client_config(self, sandbox=False):
""""""
self.client_config = TigerOpenClientConfig(sandbox_debug=sandbox)
self.client_config.private_key = self.private_key
self.client_config.tiger_id = self.tiger_id
self.client_config.account = self.account
self.client_config.language = self.language
def connect_quote(self):
"""
Connect to market data server.
"""
try:
self.quote_client = QuoteClient(self.client_config)
self.symbol_names = dict(
self.quote_client.get_symbol_names(lang=Language.zh_CN))
self.query_contract()
except ApiException:
self.write_log("查询合约失败")
return
self.write_log("行情接口连接成功")
self.write_log("合约查询成功")
def connect_trade(self):
"""
Connect to trade server.
"""
self.trade_client = TradeClient(self.client_config)
try:
self.add_task(self.query_order)
self.add_task(self.query_position)
self.add_task(self.query_account)
except ApiException:
self.write_log("交易接口连接失败")
return
self.write_log("交易接口连接成功")
def connect_push(self):
"""
Connect to push server.
"""
protocol, host, port = self.client_config.socket_host_port
self.push_client = PushClient(host, port, (protocol == "ssl"))
self.push_client.quote_changed = self.on_quote_change
self.push_client.asset_changed = self.on_asset_change
self.push_client.position_changed = self.on_position_change
self.push_client.order_changed = self.on_order_change
self.push_client.connect_callback = self.on_push_connected
self.push_client.connect(
self.client_config.tiger_id, self.client_config.private_key)
def subscribe(self, req: SubscribeRequest):
""""""
self.subscribed_symbols.add(req.symbol)
if self.push_connected:
self.push_client.subscribe_quote([req.symbol])
def on_push_connected(self):
""""""
self.push_connected = True
self.write_log("推送接口连接成功")
self.push_client.subscribe_asset()
self.push_client.subscribe_position()
self.push_client.subscribe_order()
self.push_client.subscribe_quote(list(self.subscribed_symbols))
def on_quote_change(self, tiger_symbol: str, data: list, trading: bool):
""""""
data = dict(data)
symbol, exchange = convert_symbol_tiger2vt(tiger_symbol)
tick = self.ticks.get(symbol, None)
if not tick:
tick = TickData(
symbol=symbol,
exchange=exchange,
gateway_name=self.gateway_name,
datetime=datetime.now(CHINA_TZ),
name=self.symbol_names[symbol],
)
self.ticks[symbol] = tick
tick.datetime = datetime.fromtimestamp(int(data["timestamp"]) / 1000)
tick.pre_close = data.get("prev_close", tick.pre_close)
tick.last_price = data.get("latest_price", tick.last_price)
tick.volume = data.get("volume", tick.volume)
tick.open_price = data.get("open", tick.open_price)
tick.high_price = data.get("high", tick.high_price)
tick.low_price = data.get("low", tick.low_price)
tick.ask_price_1 = data.get("ask_price", tick.ask_price_1)
tick.bid_price_1 = data.get("bid_price", tick.bid_price_1)
tick.ask_volume_1 = data.get("ask_size", tick.ask_volume_1)
tick.bid_volume_1 = data.get("bid_size", tick.bid_volume_1)
self.on_tick(copy(tick))
def on_asset_change(self, tiger_account: str, data: list):
""""""
data = dict(data)
if "net_liquidation" not in data:
return
account = AccountData(
accountid=tiger_account,
balance=data["net_liquidation"],
frozen=0.0,
gateway_name=self.gateway_name,
)
self.on_account(account)
def on_position_change(self, tiger_account: str, data: list):
""""""
data = dict(data)
symbol, exchange = convert_symbol_tiger2vt(data["origin_symbol"])
pos = PositionData(
symbol=symbol,
exchange=exchange,
direction=Direction.NET,
volume=int(data["quantity"]),
frozen=0.0,
price=data["average_cost"],
pnl=data["unrealized_pnl"],
gateway_name=self.gateway_name,
)
self.on_position(pos)
def on_order_change(self, tiger_account: str, data: list):
""""""
data = dict(data)
symbol, exchange = convert_symbol_tiger2vt(data["origin_symbol"])
status = STATUS_TIGER2VT[data["status"]]
dt = datetime.fromtimestamp(data["order_time"] / 1000)
dt = CHINA_TZ.localize(dt)
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=self.ID_TIGER2VT.get(
str(data["order_id"]), self.get_new_local_id()),
direction=Direction.NET,
price=data.get("limit_price", 0),
volume=data["quantity"],
traded=data["filled"],
status=status,
datetime=dt,
gateway_name=self.gateway_name,
)
self.ID_TIGER2VT[str(data["order_id"])] = order.orderid
self.on_order(order)
if status == Status.ALLTRADED:
dt = datetime.fromtimestamp(data["trade_time"] / 1000)
dt = CHINA_TZ.localize(dt)
self.tradeid += 1
trade = TradeData(
symbol=symbol,
exchange=exchange,
direction=Direction.NET,
tradeid=self.tradeid,
orderid=self.ID_TIGER2VT[str(data["order_id"])],
price=data["avg_fill_price"],
volume=data["filled"],
datetime=dt,
gateway_name=self.gateway_name,
)
self.on_trade(trade)
def get_new_local_id(self):
self.local_id += 1
return self.local_id
def send_order(self, req: OrderRequest):
""""""
local_id = self.get_new_local_id()
order = req.create_order_data(local_id, self.gateway_name)
self.on_order(order)
self.add_task(self._send_order, req, local_id)
return order.vt_orderid
def _send_order(self, req: OrderRequest, local_id):
""""""
currency = config_symbol_currency(req.symbol)
try:
contract = self.trade_client.get_contracts(
symbol=req.symbol, currency=currency)[0]
order = self.trade_client.create_order(
account=self.account,
contract=contract,
action=DIRECTION_VT2TIGER[req.direction],
order_type=ORDERTYPE_VT2TIGER[req.type],
quantity=int(req.volume),
limit_price=req.price,
)
self.ID_TIGER2VT[str(order.order_id)] = local_id
self.ID_VT2TIGER[local_id] = str(order.order_id)
self.trade_client.place_order(order)
except: # noqa
traceback.print_exc()
self.write_log("发单失败")
return
def cancel_order(self, req: CancelRequest):
""""""
self.add_task(self._cancel_order, req)
def _cancel_order(self, req: CancelRequest):
""""""
try:
order_id = self.ID_VT2TIGER[req.orderid]
data = self.trade_client.cancel_order(order_id=order_id)
except ApiException:
self.write_log(f"撤单失败:{req.orderid}")
if not data:
self.write_log("撤单成功")
def query_contract(self):
""""""
# HK Stock
symbols_names_HK = self.quote_client.get_symbol_names(
lang=Language.zh_CN, market=Market.HK)
contract_names_HK = DataFrame(
symbols_names_HK, columns=["symbol", "name"])
contractList = list(contract_names_HK["symbol"])
i, n = 0, len(contractList)
result = pd.DataFrame()
while i < n:
i += 50
c = contractList[i - 50:i]
r = self.quote_client.get_trade_metas(c)
result = result.append(r)
contract_detail_HK = result.sort_values(by="symbol", ascending=True)
contract_HK = pd.merge(
contract_names_HK, contract_detail_HK, how="left", on="symbol")
for ix, row in contract_HK.iterrows():
contract = ContractData(
symbol=row["symbol"],
exchange=Exchange.SEHK,
name=row["name"],
product=Product.EQUITY,
size=1,
min_volume=row["lot_size"],
pricetick=row["min_tick"],
net_position=True,
gateway_name=self.gateway_name,
)
self.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
# US Stock
symbols_names_US = self.quote_client.get_symbol_names(
lang=Language.zh_CN, market=Market.US)
contract_US = DataFrame(symbols_names_US, columns=["symbol", "name"])
for ix, row in contract_US.iterrows():
contract = ContractData(
symbol=row["symbol"],
exchange=Exchange.SMART,
name=row["name"],
product=Product.EQUITY,
size=1,
min_volume=100,
pricetick=0.001,
gateway_name=self.gateway_name,
)
self.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
# CN Stock
symbols_names_CN = self.quote_client.get_symbol_names(
lang=Language.zh_CN, market=Market.CN)
contract_CN = DataFrame(symbols_names_CN, columns=["symbol", "name"])
for ix, row in contract_CN.iterrows():
symbol = row["symbol"]
symbol, exchange = convert_symbol_tiger2vt(symbol)
contract = ContractData(
symbol=symbol,
exchange=exchange,
name=row["name"],
product=Product.EQUITY,
size=1,
min_volume=100,
pricetick=0.001,
gateway_name=self.gateway_name,
)
self.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
def query_account(self):
""""""
try:
assets = self.trade_client.get_assets()
except ApiException:
self.write_log("查询资金失败")
return
for i in assets:
account = AccountData(
accountid=self.account,
balance=i.summary.net_liquidation,
frozen=0.0,
gateway_name=self.gateway_name,
)
self.on_account(account)
def query_position(self):
""""""
try:
position = self.trade_client.get_positions()
except ApiException:
self.write_log("查询持仓失败")
return
for i in position:
symbol, exchange = convert_symbol_tiger2vt(i.contract.symbol)
pos = PositionData(
symbol=symbol,
exchange=exchange,
direction=Direction.NET,
volume=int(i.quantity),
frozen=0.0,
price=i.average_cost,
pnl=float(i.unrealized_pnl),
gateway_name=self.gateway_name,
)
self.on_position(pos)
def query_order(self):
""""""
try:
data = self.trade_client.get_orders()
data = sorted(data, key=lambda x: x.order_time, reverse=False)
except: # noqa
traceback.print_exc()
self.write_log("查询委托失败")
return
self.process_order(data)
self.process_deal(data)
def close(self):
""""""
self.active = False
if self.push_client:
self.push_client.disconnect()
def process_order(self, data):
""""""
for i in data:
symbol, exchange = convert_symbol_tiger2vt(str(i.contract))
local_id = self.get_new_local_id()
dt = datetime.fromtimestamp(i.order_time / 1000)
dt = CHINA_TZ.localize(dt)
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=local_id,
direction=Direction.NET,
price=i.limit_price if i.limit_price else 0.0,
volume=i.quantity,
traded=i.filled,
status=STATUS_TIGER2VT[i.status],
datetime=dt,
gateway_name=self.gateway_name,
)
self.ID_TIGER2VT[str(i.order_id)] = local_id
self.on_order(order)
self.ID_VT2TIGER = {v: k for k, v in self.ID_TIGER2VT.items()}
def process_deal(self, data):
"""
Process trade data for both query and update.
"""
for i in data:
if i.status == OrderStatus.PARTIALLY_FILLED or i.status == OrderStatus.FILLED:
symbol, exchange = convert_symbol_tiger2vt(str(i.contract))
self.tradeid += 1
dt = datetime.fromtimestamp(i.trade_time / 1000)
dt = CHINA_TZ.localize(dt)
trade = TradeData(
symbol=symbol,
exchange=exchange,
direction=Direction.NET,
tradeid=self.tradeid,
orderid=self.ID_TIGER2VT[str(i.order_id)],
price=i.avg_fill_price,
volume=i.filled,
datetime=dt,
gateway_name=self.gateway_name,
)
self.on_trade(trade)
@functools.lru_cache()
def convert_symbol_tiger2vt(symbol):
"""
Convert symbol from vt to tiger.
"""
if symbol.encode("UTF-8").isalpha():
exchange = Exchange.SMART
else:
if len(symbol) < 6:
exchange = Exchange.SEHK
elif symbol.startswith("6"):
exchange = Exchange.SSE
elif symbol.endswith(".SH"):
exchange = Exchange.SSE
symbol = symbol.strip(".SH")
else:
exchange = Exchange.SZSE
return symbol, exchange
@functools.lru_cache()
def convert_symbol_vt2tiger(symbol, exchange):
"""
Convert symbol from vt to tiger.
"""
if exchange == Exchange.SSE and symbol.startswith("0"):
symbol = symbol + ".SH"
else:
symbol = symbol
return symbol
@functools.lru_cache()
def config_symbol_currency(symbol):
"""
Config symbol to corresponding currency
"""
if symbol.encode("UTF-8").isalpha():
currency = Currency.USD
else:
if len(symbol) < 6:
currency = Currency.HKD
else:
currency = Currency.CNH
return currency
| mit |
berlinguyinca/spectra-hash | utilities/splash-analysis/scripts/splash_stats.py | 1 | 11806 | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
header_rows =[
'origin1' ,'origin2', 'hash_match',
'nominal_sim', 'nominal_stein_sim', 'accurate_sim', 'accurate_stein_sim',
'shist_manhattan', 'shist_cmanhattan', 'shist_levenshtein', 'shist_chi2', 'shist_bhattacharyya', 'shist_sim',
'lhist_manhattan', 'lhist_cmanhattan', 'lhist_levenshtein', 'lhist_chi2', 'lhist_bhattacharyya', '', 'lhist_sim',
'sum_sim', 'asum_sim'
]
FILENAME = 'highsim_accurate_56.csv'
FILENAME = '20151105/highsim'
df = pd.read_csv(FILENAME, names = header_rows, index_col = [0, 1])
def bin_data(x, y, bins = 25, err_scale = 0.25):
t = np.linspace(0.95, 1.0, bins + 1)
y_mean, y_std = [], []
for i in range(bins):
data = y[(x >= t[i]) & (x < t[i + 1])]
y_mean.append(np.mean(data))
y_std.append(np.std(data) / len(data)**err_scale)
t = [(t[i] + t[i + 1]) / 2 for i in range(bins)]
return t, np.array(y_mean), np.array(y_std)
with PdfPages('plots.pdf') as pdf:
plt.figure()
plt.title('Similarity Distribution')
plt.xlabel('Spectral Similarity Score')
plt.ylabel('N')
plt.hist(df.accurate_sim, bins = 15, histtype = 'step', label = 'Dot Product')
plt.hist(df.accurate_stein_sim, bins = 15, histtype = 'step', label = 'Transformed Dot Product - Short Histogram')
min(min(df.accurate_sim), min(df.accurate_stein_sim))
plt.xlim((min(min(df.accurate_sim), min(df.accurate_stein_sim)), max(max(df.accurate_sim), max(df.accurate_stein_sim))))
plt.legend(loc = 'upper left', fontsize = 'x-small')
pdf.savefig()
plt.figure()
plt.title('Levenshtein Distance')
plt.xlabel('Spectral Similarity Score')
plt.ylabel('Levenshtein Distance')
t, y, std = bin_data(df.accurate_sim, df.shist_levenshtein, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.shist_levenshtein, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_sim, df.lhist_levenshtein, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Long Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.lhist_levenshtein, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Long Histogram')
plt.xlim((min(t), max(t)))
plt.legend(loc = 'upper right', fontsize = 'x-small')
pdf.savefig()
plt.figure()
plt.title('Manhattan Distance')
plt.xlabel('Spectral Similarity Score')
plt.ylabel('Manhattan Distance')
t, y, std = bin_data(df.accurate_sim, df.shist_manhattan, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.shist_manhattan, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_sim, df.lhist_manhattan, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Long Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.lhist_manhattan, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Long Histogram')
plt.xlim((min(t), max(t)))
plt.legend(loc = 'upper right', fontsize = 'x-small')
pdf.savefig()
plt.figure()
plt.title('Manhattan Similarity')
plt.xlabel('Spectral Similarity Score')
plt.ylabel('Manhattan Similarity')
t, y, std = bin_data(df.accurate_sim, 1 - df.shist_manhattan / 36 / 10, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_stein_sim, 1 - df.shist_manhattan / 36 / 10, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_sim, 1 - df.lhist_manhattan / 36 / 20, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Long Histogram')
t, y, std = bin_data(df.accurate_stein_sim, 1 - df.lhist_manhattan / 36 / 20, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Long Histogram')
plt.xlim((min(t), max(t)))
plt.legend(loc = 'lower right', fontsize = 'x-small')
pdf.savefig()
plt.figure()
plt.title(r'$\chi^2$ Distance')
plt.xlabel('Spectral Similarity Score')
plt.ylabel(r'$\chi^2$ Distance')
t, y, std = bin_data(df.accurate_sim, df.shist_chi2, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.shist_chi2, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_sim, df.lhist_chi2, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Long Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.lhist_chi2, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Long Histogram')
plt.xlim((min(t), max(t)))
plt.legend(loc = 'upper right', fontsize = 'x-small')
pdf.savefig()
plt.figure()
plt.title('Bhattacharyya Distance')
plt.xlabel('Spectral Similarity Score')
plt.ylabel('Bhattacharyya Distance')
t, y, std = bin_data(df.accurate_sim, df.shist_bhattacharyya, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.shist_bhattacharyya, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_sim, df.lhist_bhattacharyya, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Long Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.lhist_bhattacharyya, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Long Histogram')
plt.xlim((min(t), max(t)))
plt.legend(loc = 'upper right', fontsize = 'x-small')
pdf.savefig()
plt.figure()
plt.title('Histogram Dot Product')
plt.xlabel('Spectral Similarity Score')
plt.ylabel('Histogram Dot Product')
t, y, std = bin_data(df.accurate_sim, df.shist_sim, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.shist_sim, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Short Histogram')
t, y, std = bin_data(df.accurate_sim, df.lhist_sim, 25)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Long Histogram')
t, y, std = bin_data(df.accurate_stein_sim, df.lhist_sim, 25)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Long Histogram')
plt.xlim((min(t), max(t)))
plt.legend(loc = 'lower right', fontsize = 'x-small')
pdf.savefig()
plt.figure()
plt.title('Spectrum Sum Difference')
plt.xlabel('Spectral Similarity Score')
plt.ylabel('Spectrum Sum Difference')
t, y, std = bin_data(df.accurate_sim, df.sum_sim, 25, 0.125)
plt.errorbar(t, y, yerr = std, label = 'Dot Product - Sum')
t, y, std = bin_data(df.accurate_stein_sim, df.sum_sim, 25, 0.125)
plt.errorbar(t, y, yerr = std, label = 'Transformed Dot Product - Sum')
plt.xlim((min(t), max(t)))
plt.legend(loc = 'upper right', fontsize = 'x-small')
pdf.savefig()
nist_msms_28047 = '78.9591:999.00 80.9633:2.80 96.9697:57.34 134.9854:5.19 150.9803:6.39 152.9959:171.43 153.9992:5.59 171.0064:17.48 237.2223:7.69 238.2259:1.10 255.2330:162.44 256.2363:25.67 257.2393:1.80'
nist_msms_28272 = '56.3603:1.80 78.9590:999.00 79.9631:2.70 80.9632:6.69 83.0502:4.60 96.9696:69.93 121.1020:6.79 134.9854:8.69 135.1180:9.19 147.1177:2.70 150.9801:7.59 152.9958:346.15 153.9992:19.08 155.0001:2.90 161.1333:2.80 163.0761:2.00 171.0062:66.43 172.0093:2.00 209.0222:2.00 227.0320:7.39 237.2224:7.09 255.2327:425.67 256.2361:162.14 257.2394:5.99 283.2425:4.30'
nist_msms_27496 = '81.0696:6.19 83.0488:1.60 93.0695:2.00 95.0852:8.39 97.0644:46.25 97.1009:4.30 107.0853:4.50 109.0645:8.59 109.1009:14.09 119.0852:3.30 121.0646:2.70 121.1010:8.39 123.0801:3.20 123.1167:2.80 125.0958:4.70 127.1114:4.80 131.0852:3.10 133.1009:4.30 135.1166:6.79 137.0957:3.30 143.0852:2.30 143.1064:3.70 145.1009:5.00 147.0801:2.00 147.1166:5.59 149.0959:4.80 149.1323:3.10 157.1009:7.39 159.1165:6.59 161.0958:6.19 161.1322:3.90 163.1114:5.89 163.1478:1.80 167.1427:2.60 171.1166:3.30 173.0957:4.70 173.1321:4.40 175.1113:8.29 175.1477:2.60 177.1270:46.95 177.1632:2.90 185.1320:3.20 187.1479:2.10 189.1271:3.40 189.1634:2.50 191.1427:1.70 191.1790:4.60 199.1479:1.60 201.1633:3.20 211.1477:1.90 215.1429:2.80 219.1740:4.20 229.1584:3.20 235.1688:3.00 237.1844:10.09 239.1792:1.70 241.1581:2.20 243.1738:7.79 249.1844:3.10 251.1790:4.20 253.1948:5.89 255.1737:5.79 257.1895:3.70 259.2053:3.40 267.1740:5.49 269.1895:52.45 271.2051:18.88 277.2156:43.66 279.2100:1.80 283.2053:4.00 285.2207:7.79 287.2001:1.80 289.2157:3.10 295.2049:4.20 297.2206:29.77 299.2359:5.19 309.2566:1.70 317.2466:6.39 337.2516:3.60 349.2881:4.80 355.2625:7.49 359.2725:3.40 367.2988:83.82 377.2832:89.21 395.2937:432.37 413.3042:999.00'
nist_msms_27497 = '67.0538:3.30 69.0695:5.59 79.0537:2.80 81.0695:32.77 83.0487:11.79 83.0850:7.39 91.0536:2.40 93.0694:10.69 95.0852:47.15 97.0644:141.06 97.1008:24.58 105.0694:9.39 107.0852:26.67 109.0645:36.46 109.1009:56.04 111.0800:2.30 111.1162:3.50 119.0853:20.18 121.0645:13.49 121.1009:35.66 123.0801:17.18 123.1166:14.69 125.0958:14.39 127.1113:12.79 131.0852:13.89 133.1009:22.88 135.0799:2.10 135.1166:32.57 137.0958:14.89 137.1319:4.10 139.1112:3.20 143.0851:10.99 143.1063:10.29 145.1009:25.87 147.0800:10.09 147.1166:25.27 149.0958:22.08 149.1321:19.28 151.1112:5.59 155.0849:6.99 157.1008:23.68 159.1165:28.27 161.0958:25.37 161.1322:17.78 163.1114:25.07 163.1476:9.59 165.1268:7.09 167.1425:5.99 169.1007:8.19 171.1164:15.88 173.0958:21.18 173.1321:22.08 175.1114:43.86 175.1476:14.09 177.1270:193.71 177.1632:14.89 183.1163:6.89 185.1319:13.99 187.1112:7.29 187.1476:12.49 189.1270:18.18 189.1632:12.79 191.1425:10.79 191.1791:19.88 195.1374:5.00 197.1321:7.39 199.1476:8.29 201.1268:7.09 201.1634:14.29 203.1425:5.00 203.1786:2.40 205.1581:3.10 209.1322:2.00 211.1476:8.49 213.1634:5.49 215.1427:9.09 217.1581:4.20 219.1738:8.99 223.1689:3.80 225.1633:4.50 227.1426:7.29 229.1584:22.08 231.1739:6.49 235.1688:10.49 237.1634:2.10 237.1844:21.68 239.1789:9.89 241.1582:11.59 241.1946:7.29 243.1739:21.38 249.1846:7.29 251.1789:16.68 253.1946:20.48 255.1738:19.58 257.1894:10.99 259.2051:9.99 263.2000:4.20 267.1737:17.18 269.1895:152.65 270.1975:4.00 271.2051:54.15 273.2209:2.00 277.2156:127.37 279.2101:9.19 281.1896:4.10 281.2259:4.60 283.2050:13.49 285.1842:3.30 285.2207:22.08 287.2001:7.09 287.2363:4.30 289.2156:7.49 295.2049:13.29 297.2207:82.62 299.2362:12.59 309.2569:7.19 311.2362:2.90 313.2159:2.00 313.2515:2.10 317.2469:10.29 329.2466:2.20 331.2626:2.80 337.2519:10.09 349.2880:15.08 355.2624:21.28 359.2728:8.79 367.2988:249.65 377.2832:168.03 395.2937:641.26 413.3042:999.00'
plt.figure()
ax = plt.subplot(2, 1, 1)
plt.title('High Spectral Similarity, Low Histogram Similarity')
for ion in nist_msms_27496.split():
mz, intensity = map(float, ion.split(':'))
plt.plot([mz, mz], [0, intensity], 'b-')
ax.text(100, 900, 'NIST14 MSMS 27496', fontsize = 14)
text = 'Dot Product: 0.954908\n'
text += 'Transformed Dot Product: 0.972003\n\n'
text += 'Manhattan Distance: 67\n'
text += 'Manhattan Similarity: 0.813888\n'
text += 'Histogram Dot Product: 0.849184\n\n'
text += 'Long Histogram Manhattan Distance: 70\n'
text += 'Long Histogram Manhattan Similarity: 0.902777\n'
text += 'Long Histogram Histogram Dot Product: 0.884707\n'
ax.text(100, 200, text, fontsize = 8)
plt.ylim((0, 1050))
plt.ylabel('Intensity')
ax = plt.subplot(2, 1, 2)
for ion in nist_msms_27497.split():
mz, intensity = map(float, ion.split(':'))
plt.plot([mz, mz], [0, intensity], 'b-')
ax.text(100, 900, 'NIST14 MSMS 27497', fontsize = 14)
plt.ylim((0, 1050))
plt.xlabel('m/z')
plt.ylabel('Intensity')
pdf.savefig() | bsd-3-clause |
moreati/pandashells | pandashells/test/p_cdf_tests.py | 10 | 1185 | #! /usr/bin/env python
from mock import patch
from unittest import TestCase
import pandas as pd
from pandashells.bin.p_cdf import main
class MainTests(TestCase):
@patch(
'pandashells.bin.p_cdf.sys.argv',
'p.cdf -c x -q -n 10'.split())
@patch('pandashells.bin.p_cdf.io_lib.df_to_output')
@patch('pandashells.bin.p_cdf.io_lib.df_from_input')
def test_cli_quiet(self, df_from_input_mock, df_to_output_mock):
df_in = pd.DataFrame({
'x': range(1, 101)
})
df_from_input_mock.return_value = df_in
main()
df_out = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(list(df_out.columns), ['x', 'p_less', 'p_greater'])
self.assertEqual(len(df_out), 10)
@patch(
'pandashells.bin.p_cdf.sys.argv',
'p.cdf -c x -n 10'.split())
@patch('pandashells.bin.p_cdf.plot_lib.show')
@patch('pandashells.bin.p_cdf.io_lib.df_from_input')
def test_cli(self, df_from_input_mock, show_mock):
df_in = pd.DataFrame({
'x': range(1, 101)
})
df_from_input_mock.return_value = df_in
main()
self.assertTrue(show_mock.called)
| bsd-2-clause |
thomasgibson/tabula-rasa | HDG_CG_comp/run_cg.py | 1 | 6794 | from argparse import ArgumentParser
from collections import defaultdict
from firedrake import COMM_WORLD, parameters
from firedrake.petsc import PETSc
from mpi4py import MPI
import os
import pandas as pd
import cg_problem as module
parameters["pyop2_options"]["lazy_evaluation"] = False
parser = ArgumentParser(description="""Profile CG solver.""",
add_help=False)
parser.add_argument("--results_file", action="store",
default="results/CG_data",
help="Where to put the results.")
parser.add_argument("--dim", action="store", default=3,
type=int, choices=[2, 3], help="Problem dimension.")
parser.add_argument("--quads", action="store_true",
help="Use quadrilateral elements")
parser.add_argument("--help", action="store_true", help="Show help.")
args, _ = parser.parse_known_args()
if args.help:
import sys
help = parser.format_help()
PETSc.Sys.Print("%s\n" % help)
sys.exit(0)
results = os.path.abspath(args.results_file)
warm = defaultdict(bool)
PETSc.Log.begin()
def run_solver(problem_cls, degree, size, rtol, quads, dim, cold=False):
params = {"ksp_type": "cg",
"ksp_rtol": rtol,
"pc_type": "hypre",
"pc_hypre_type": "boomeramg",
"pc_hypre_boomeramg_strong_threshold": 0.75,
"pc_hypre_boomeramg_agg_nl": 2}
problem = problem_cls(degree=degree, N=size,
quadrilaterals=quads, dimension=dim)
name = getattr(problem, "name")
solver = problem.solver(parameters=params)
if cold:
PETSc.Sys.Print("""
Running cold solve on coarse mesh for degree %d.\n
""" % degree)
solver.solve()
return
PETSc.Sys.Print("""
\nSolving problem: %s.\n
Approximation degree: %s\n
Problem size: %s ^ %s\n
Quads: %s\n
""" % (name, problem.degree, problem.N, problem.dim, problem.quads))
if not warm[(name, degree, size)]:
PETSc.Sys.Print("Warmup solve\n")
problem.u.assign(0)
with PETSc.Log.Stage("Warmup..."):
solver.solve()
warm[(name, degree, size)] = True
problem.u.assign(0)
PETSc.Sys.Print("Timed solve...")
solver.snes.setConvergenceHistory()
solver.snes.ksp.setConvergenceHistory()
warm_stage = "%s(deg=%s, N=%s, dim=%s) Warm solve\n" % (name,
degree,
size,
dim)
with PETSc.Log.Stage(warm_stage):
solver.solve()
snes = PETSc.Log.Event("SNESSolve").getPerfInfo()
ksp = PETSc.Log.Event("KSPSolve").getPerfInfo()
pcsetup = PETSc.Log.Event("PCSetUp").getPerfInfo()
pcapply = PETSc.Log.Event("PCApply").getPerfInfo()
jac_eval = PETSc.Log.Event("SNESJacobianEval").getPerfInfo()
residual = PETSc.Log.Event("SNESFunctionEval").getPerfInfo()
comm = problem.comm
snes_time = comm.allreduce(snes["time"], op=MPI.SUM) / comm.size
ksp_time = comm.allreduce(ksp["time"], op=MPI.SUM) / comm.size
pcsetup_time = comm.allreduce(pcsetup["time"], op=MPI.SUM) / comm.size
pcapply_time = comm.allreduce(pcapply["time"], op=MPI.SUM) / comm.size
jac_time = comm.allreduce(jac_eval["time"], op=MPI.SUM) / comm.size
res_time = comm.allreduce(residual["time"], op=MPI.SUM) / comm.size
num_cells = comm.allreduce(problem.mesh.cell_set.size, op=MPI.SUM)
err = problem.err
true_err = problem.true_err
if COMM_WORLD.rank == 0:
if not os.path.exists(os.path.dirname(results)):
os.makedirs(os.path.dirname(results))
data = {"SNESSolve": snes_time,
"KSPSolve": ksp_time,
"PCSetUp": pcsetup_time,
"PCApply": pcapply_time,
"SNESJacobianEval": jac_time,
"SNESFunctionEval": res_time,
"num_processes": problem.comm.size,
"mesh_size": problem.N,
"num_cells": num_cells,
"degree": problem.degree,
"dofs": problem.u.dof_dset.layout_vec.getSize(),
"name": problem.name,
"disc_error": err,
"true_err": true_err,
"ksp_iters": solver.snes.ksp.getIterationNumber()}
df = pd.DataFrame(data, index=[0])
if problem.quads:
result_file = results + "_N%d_deg%d_quads.csv" % (problem.N,
problem.degree)
else:
result_file = results + "_N%d_deg%d.csv" % (problem.N,
problem.degree)
df.to_csv(result_file, index=False, mode="w", header=True)
PETSc.Sys.Print("Solving %s(deg=%s, N=%s, dim=%s) finished.\n" %
(name, problem.degree, problem.N, problem.dim))
PETSc.Sys.Print("L2 error: %s\n" % true_err)
PETSc.Sys.Print("Algebraic error: %s\n" % err)
dim = args.dim
if dim == 3:
# (degree, size, rtol) NOTE: rtol is chosen such that the
# iterative solver reaches the minimal algebraic error
# so that we avoid "oversolving"
cg_params = [(2, 4, 1.0e-4),
(2, 8, 1.0e-5),
(2, 16, 1.0e-6),
(2, 32, 1.0e-7),
(2, 64, 1.0e-8),
(2, 128, 1.0e-9),
# Degree 3 set
(3, 4, 1.0e-6),
(3, 8, 1.0e-7),
(3, 16, 1.0e-8),
(3, 32, 1.0e-9),
(3, 64, 1.0e-10),
(3, 128, 1.0e-11),
# Degree 4 set
(4, 4, 1.0e-8),
(4, 8, 1.0e-9),
(4, 16, 1.0e-10),
(4, 32, 1.0e-11),
(4, 64, 1.0e-12)]
cold_params = [(2, 4, 1.0e-4),
(3, 4, 1.0e-6),
(4, 4, 1.0e-8)]
else:
# If a 2D run is desired, we can set one up.
raise NotImplementedError("Dim %s not set up yet." % dim)
problem_cls = module.CGProblem
quads = args.quads
for cold_param in cold_params:
degree, size, rtol = cold_param
run_solver(problem_cls=problem_cls, degree=degree,
size=size, rtol=rtol, quads=quads, dim=dim,
cold=True)
# Now we profile once the code has been generated
for cg_param in cg_params:
degree, size, rtol = cg_param
run_solver(problem_cls=problem_cls, degree=degree,
size=size, rtol=rtol, quads=quads, dim=dim,
cold=False)
| mit |
lehinevych/Dato-Core | src/unity/python/graphlab/data_structures/sframe.py | 13 | 196438 | """
This module defines the SFrame class which provides the
ability to create, access and manipulate a remote scalable dataframe object.
SFrame acts similarly to pandas.DataFrame, but the data is completely immutable
and is stored column wise on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab.connect as _mt
import graphlab.connect.main as glconnect
from graphlab.cython.cy_type_utils import infer_type_of_list
from graphlab.cython.context import debug_trace as cython_context
from graphlab.cython.cy_sframe import UnitySFrameProxy
from graphlab.util import _check_canvas_enabled, _make_internal_url, _is_callable
from graphlab.data_structures.sarray import SArray, _create_sequential_sarray
import graphlab.aggregate
import graphlab
import array
from prettytable import PrettyTable
from textwrap import wrap
import datetime
import inspect
from graphlab.deps import pandas, HAS_PANDAS
import time
import itertools
import os
import subprocess
import uuid
import platform
__all__ = ['SFrame']
SFRAME_GARBAGE_COLLECTOR = []
FOOTER_STRS = ['Note: Only the head of the SFrame is printed.',
'You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.']
LAZY_FOOTER_STRS = ['Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.',
'You can use len(sf) to force materialization.']
SFRAME_ROOTS = [# Binary/lib location in production egg
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..')),
# Build tree location of SFrame binaries
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'sframe')),
# Location of python sources
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'unity', 'python', 'graphlab')),
# Build tree dependency location
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', '..', '..', 'deps', 'local', 'lib'))
]
RDD_SFRAME_PICKLE = "rddtosf_pickle"
RDD_SFRAME_NONPICKLE = "rddtosf_nonpickle"
SFRAME_RDD_PICKLE = "sftordd_pickle"
HDFS_LIB = "libhdfs.so"
RDD_JAR_FILE = "graphlab-create-spark-integration.jar"
SYS_UTIL_PY = "sys_util.py"
RDD_SUPPORT_INITED = False
BINARY_PATHS = {}
STAGING_DIR = None
RDD_SUPPORT = True
PRODUCTION_RUN = False
YARN_OS = None
SPARK_SUPPORT_NAMES = {'RDD_SFRAME_PATH':'rddtosf_pickle',
'RDD_SFRAME_NONPICKLE_PATH':'rddtosf_nonpickle',
'SFRAME_RDD_PATH':'sftordd_pickle',
'HDFS_LIB_PATH':'libhdfs.so',
'RDD_JAR_PATH':'graphlab-create-spark-integration.jar',
'SYS_UTIL_PY_PATH':'sys_util.py',
'SPARK_PIPE_WRAPPER_PATH':'spark_pipe_wrapper'}
first = True
for i in SFRAME_ROOTS:
for key,val in SPARK_SUPPORT_NAMES.iteritems():
tmp_path = os.path.join(i, val)
if key not in BINARY_PATHS and os.path.isfile(tmp_path):
BINARY_PATHS[key] = tmp_path
if all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
if first:
PRODUCTION_RUN = True
break
first = False
if not all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
RDD_SUPPORT = False
def get_spark_integration_jar_path():
"""
The absolute path of the jar file required to enable GraphLab Create's
integration with Apache Spark.
"""
if 'RDD_JAR_PATH' not in BINARY_PATHS:
raise RuntimeError("Could not find a spark integration jar. "\
"Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?")
return BINARY_PATHS['RDD_JAR_PATH']
def __rdd_support_init__(sprk_ctx):
global YARN_OS
global RDD_SUPPORT_INITED
global STAGING_DIR
global BINARY_PATHS
if not RDD_SUPPORT or RDD_SUPPORT_INITED:
return
# Make sure our GraphLabUtil scala functions are accessible from the driver
try:
tmp = sprk_ctx._jvm.org.graphlab.create.GraphLabUtil.EscapeString(sprk_ctx._jvm.java.lang.String("1,2,3,4"))
except:
raise RuntimeError("Could not execute RDD translation functions. "\
"Please make sure you have started Spark "\
"(either with spark-submit or pyspark) with the following flag set:\n"\
"'--driver-class-path " + BINARY_PATHS['RDD_JAR_PATH']+"'\n"\
"OR set the property spark.driver.extraClassPath in spark-defaults.conf")
dummy_rdd = sprk_ctx.parallelize([1])
if PRODUCTION_RUN and sprk_ctx.master == 'yarn-client':
# Get cluster operating system
os_rdd = dummy_rdd.map(lambda x: platform.system())
YARN_OS = os_rdd.collect()[0]
# Set binary path
for i in BINARY_PATHS.keys():
s = BINARY_PATHS[i]
if os.path.basename(s) == SPARK_SUPPORT_NAMES['SYS_UTIL_PY_PATH']:
continue
if YARN_OS == 'Linux':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'linux', os.path.basename(s))
elif YARN_OS == 'Darwin':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'osx', os.path.basename(s))
else:
raise RuntimeError("YARN cluster has unsupported operating system "\
"(something other than Linux or Mac OS X). "\
"Cannot convert RDDs on this cluster to SFrame.")
# Create staging directory
staging_dir = '.graphlabStaging'
if sprk_ctx.master == 'yarn-client':
tmp_loc = None
# Get that staging directory's full name
tmp_loc = dummy_rdd.map(
lambda x: subprocess.check_output(
["hdfs", "getconf", "-confKey", "fs.defaultFS"]).rstrip()).collect()[0]
STAGING_DIR = os.path.join(tmp_loc, "user", sprk_ctx.sparkUser(), staging_dir)
if STAGING_DIR is None:
raise RuntimeError("Failed to create a staging directory on HDFS. "\
"Do your cluster nodes have a working hdfs client?")
# Actually create the staging dir
unity = glconnect.get_unity()
unity.__mkdir__(STAGING_DIR)
unity.__chmod__(STAGING_DIR, 0777)
elif sprk_ctx.master[0:5] == 'local':
# Save the output sframes to the same temp workspace this engine is
# using
#TODO: Consider cases where server and client aren't on the same machine
unity = glconnect.get_unity()
STAGING_DIR = unity.get_current_cache_file_location()
if STAGING_DIR is None:
raise RuntimeError("Could not retrieve local staging directory! \
Please contact us on http://forum.dato.com.")
else:
raise RuntimeError("Your spark context's master is '" +
str(sprk_ctx.master) +
"'. Only 'local' and 'yarn-client' are supported.")
if sprk_ctx.master == 'yarn-client':
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_PATH'])
sprk_ctx.addFile(BINARY_PATHS['HDFS_LIB_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SFRAME_RDD_PATH'])
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SYS_UTIL_PY_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'])
sprk_ctx._jsc.addJar(BINARY_PATHS['RDD_JAR_PATH'])
RDD_SUPPORT_INITED = True
def load_sframe(filename):
"""
Load an SFrame. The filename extension is used to determine the format
automatically. This function is particularly useful for SFrames previously
saved in binary format. For CSV imports the ``SFrame.read_csv`` function
provides greater control. If the SFrame is in binary format, ``filename`` is
actually a directory, created when the SFrame is saved.
Parameters
----------
filename : string
Location of the file to load. Can be a local path or a remote URL.
Returns
-------
out : SFrame
See Also
--------
SFrame.save, SFrame.read_csv
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.save('my_sframe') # 'my_sframe' is a directory
>>> sf_loaded = graphlab.load_sframe('my_sframe')
"""
sf = SFrame(data=filename)
return sf
class SFrame(object):
"""
A tabular, column-mutable dataframe object that can scale to big data. The
data in SFrame is stored column-wise on the GraphLab Server side, and is
stored on persistent storage (e.g. disk) to avoid being constrained by
memory size. Each column in an SFrame is a size-immutable
:class:`~graphlab.SArray`, but SFrames are mutable in that columns can be
added and subtracted with ease. An SFrame essentially acts as an ordered
dict of SArrays.
Currently, we support constructing an SFrame from the following data
formats:
* csv file (comma separated value)
* sframe directory archive (A directory where an sframe was saved
previously)
* general text file (with csv parsing options, See :py:meth:`read_csv()`)
* a Python dictionary
* pandas.DataFrame
* JSON
* Apache Avro
* PySpark RDD
and from the following sources:
* your local file system
* the GraphLab Server's file system
* HDFS
* Amazon S3
* HTTP(S).
Only basic examples of construction are covered here. For more information
and examples, please see the `User Guide <https://dato.com/learn/user
guide/index.html#Working_with_data_Tabular_data>`_, `API Translator
<https://dato.com/learn/translator>`_, `How-Tos
<https://dato.com/learn/how-to>`_, and data science `Gallery
<https://dato.com/learn/gallery>`_.
Parameters
----------
data : array | pandas.DataFrame | string | dict, optional
The actual interpretation of this field is dependent on the ``format``
parameter. If ``data`` is an array or Pandas DataFrame, the contents are
stored in the SFrame. If ``data`` is a string, it is interpreted as a
file. Files can be read from local file system or urls (local://,
hdfs://, s3://, http://).
format : string, optional
Format of the data. The default, "auto" will automatically infer the
input data format. The inference rules are simple: If the data is an
array or a dataframe, it is associated with 'array' and 'dataframe'
respectively. If the data is a string, it is interpreted as a file, and
the file extension is used to infer the file format. The explicit
options are:
- "auto"
- "array"
- "dict"
- "sarray"
- "dataframe"
- "csv"
- "tsv"
- "sframe".
See Also
--------
read_csv:
Create a new SFrame from a csv file. Preferred for text and CSV formats,
because it has a lot more options for controlling the parser.
save : Save an SFrame for later use.
Notes
-----
- When working with the GraphLab EC2 instance (see
:py:func:`graphlab.aws.launch_EC2()`), an SFrame cannot be constructed
using local file path, because it involves a potentially large amount of
data transfer from client to server. However, it is still okay to use a
remote file path. See the examples below. A similar restriction applies to
:py:class:`graphlab.SGraph` and :py:class:`graphlab.SArray`.
- When reading from HDFS on Linux we must guess the location of your java
installation. By default, we will use the location pointed to by the
JAVA_HOME environment variable. If this is not set, we check many common
installation paths. You may use two environment variables to override
this behavior. GRAPHLAB_JAVA_HOME allows you to specify a specific java
installation and overrides JAVA_HOME. GRAPHLAB_LIBJVM_DIRECTORY
overrides all and expects the exact directory that your preferred
libjvm.so file is located. Use this ONLY if you'd like to use a
non-standard JVM.
Examples
--------
>>> import graphlab
>>> from graphlab import SFrame
**Construction**
Construct an SFrame from a dataframe and transfers the dataframe object
across the network.
>>> df = pandas.DataFrame()
>>> sf = SFrame(data=df)
Construct an SFrame from a local csv file (only works for local server).
>>> sf = SFrame(data='~/mydata/foo.csv')
Construct an SFrame from a csv file on Amazon S3. This requires the
environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be
set before the python session started. Alternatively, you can use
:py:func:`graphlab.aws.set_credentials()` to set the credentials after
python is started and :py:func:`graphlab.aws.get_credentials()` to verify
these environment variables.
>>> sf = SFrame(data='s3://mybucket/foo.csv')
Read from HDFS using a specific java installation (environment variable
only applies when using Linux)
>>> import os
>>> os.environ['GRAPHLAB_JAVA_HOME'] = '/my/path/to/java'
>>> from graphlab import SFrame
>>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt")
An SFrame can be constructed from a dictionary of values or SArrays:
>>> sf = gl.SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
Or equivalently:
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame({'id':ids,'val':vals})
It can also be constructed from an array of SArrays in which case column
names are automatically assigned.
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame([ids, vals])
>>> sf
Columns:
X1 int
X2 str
Rows: 3
Data:
X1 X2
0 1 A
1 2 B
2 3 C
If the SFrame is constructed from a list of values, an SFrame of a single
column is constructed.
>>> sf = SFrame([1,2,3])
>>> sf
Columns:
X1 int
Rows: 3
Data:
X1
0 1
1 2
2 3
**Parsing**
The :py:func:`graphlab.SFrame.read_csv()` is quite powerful and, can be
used to import a variety of row-based formats.
First, some simple cases:
>>> !cat ratings.csv
user_id,movie_id,rating
10210,1,1
10213,2,5
10217,2,2
10102,1,3
10109,3,4
10117,5,2
10122,2,4
10114,1,5
10125,1,1
>>> gl.SFrame.read_csv('ratings.csv')
Columns:
user_id int
movie_id int
rating int
Rows: 9
Data:
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 10210 | 1 | 1 |
| 10213 | 2 | 5 |
| 10217 | 2 | 2 |
| 10102 | 1 | 3 |
| 10109 | 3 | 4 |
| 10117 | 5 | 2 |
| 10122 | 2 | 4 |
| 10114 | 1 | 5 |
| 10125 | 1 | 1 |
+---------+----------+--------+
[9 rows x 3 columns]
Delimiters can be specified, if "," is not the delimiter, for instance
space ' ' in this case. Only single character delimiters are supported.
>>> !cat ratings.csv
user_id movie_id rating
10210 1 1
10213 2 5
10217 2 2
10102 1 3
10109 3 4
10117 5 2
10122 2 4
10114 1 5
10125 1 1
>>> gl.SFrame.read_csv('ratings.csv', delimiter=' ')
By default, "NA" or a missing element are interpreted as missing values.
>>> !cat ratings2.csv
user,movie,rating
"tom",,1
harry,5,
jack,2,2
bill,,
>>> gl.SFrame.read_csv('ratings2.csv')
Columns:
user str
movie int
rating int
Rows: 4
Data:
+---------+-------+--------+
| user | movie | rating |
+---------+-------+--------+
| tom | None | 1 |
| harry | 5 | None |
| jack | 2 | 2 |
| missing | None | None |
+---------+-------+--------+
[4 rows x 3 columns]
Furthermore due to the dictionary types and list types, can handle parsing
of JSON-like formats.
>>> !cat ratings3.csv
business, categories, ratings
"Restaurant 1", [1 4 9 10], {"funny":5, "cool":2}
"Restaurant 2", [], {"happy":2, "sad":2}
"Restaurant 3", [2, 11, 12], {}
>>> gl.SFrame.read_csv('ratings3.csv')
Columns:
business str
categories array
ratings dict
Rows: 3
Data:
+--------------+--------------------------------+-------------------------+
| business | categories | ratings |
+--------------+--------------------------------+-------------------------+
| Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} |
| Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} |
| Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} |
+--------------+--------------------------------+-------------------------+
[3 rows x 3 columns]
The list and dictionary parsers are quite flexible and can absorb a
variety of purely formatted inputs. Also, note that the list and dictionary
types are recursive, allowing for arbitrary values to be contained.
All these are valid lists:
>>> !cat interesting_lists.csv
list
[]
[1,2,3]
[1;2,3]
[1 2 3]
[{a:b}]
["c",d, e]
[[a]]
>>> gl.SFrame.read_csv('interesting_lists.csv')
Columns:
list list
Rows: 7
Data:
+-----------------+
| list |
+-----------------+
| [] |
| [1, 2, 3] |
| [1, 2, 3] |
| [1, 2, 3] |
| [{'a': 'b'}] |
| ['c', 'd', 'e'] |
| [['a']] |
+-----------------+
[7 rows x 1 columns]
All these are valid dicts:
>>> !cat interesting_dicts.csv
dict
{"classic":1,"dict":1}
{space:1 seperated:1}
{emptyvalue:}
{}
{:}
{recursive1:[{a:b}]}
{:[{:[a]}]}
>>> gl.SFrame.read_csv('interesting_dicts.csv')
Columns:
dict dict
Rows: 7
Data:
+------------------------------+
| dict |
+------------------------------+
| {'dict': 1, 'classic': 1} |
| {'seperated': 1, 'space': 1} |
| {'emptyvalue': None} |
| {} |
| {None: None} |
| {'recursive1': [{'a': 'b'}]} |
| {None: [{None: array('d')}]} |
+------------------------------+
[7 rows x 1 columns]
**Saving**
Save and load the sframe in native format.
>>> sf.save('mysframedir')
>>> sf2 = graphlab.load_sframe('mysframedir')
**Column Manipulation **
An SFrame is composed of a collection of columns of SArrays, and individual
SArrays can be extracted easily. For instance given an SFrame:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The "id" column can be extracted using:
>>> sf["id"]
dtype: int
Rows: 3
[1, 2, 3]
And can be deleted using:
>>> del sf["id"]
Multiple columns can be selected by passing a list of column names:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]})
>>> sf
Columns:
id int
val str
val2 int
Rows: 3
Data:
id val val2
0 1 A 5
1 2 B 6
2 3 C 7
>>> sf2 = sf[['id','val']]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The same mechanism can be used to re-order columns:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[['val','id']]
>>> sf
Columns:
val str
id int
Rows: 3
Data:
val id
0 A 1
1 B 2
2 C 3
**Element Access and Slicing**
SFrames can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SFrame
should be avoided.
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf[0]
{'id': 1, 'val': 'A'}
>>> sf[2]
{'id': 3, 'val': 'C'}
>>> sf[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sf[-1] # returns the last element
{'id': 3, 'val': 'C'}
>>> sf[-2] # returns the second to last element
{'id': 2, 'val': 'B'}
The SFrame also supports the full range of python slicing operators:
>>> sf[1000:] # Returns an SFrame containing rows 1000 to the end
>>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive
>>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2
>>> sf[-100:] # Returns an SFrame containing last 100 rows
>>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2
**Logical Filter**
An SFrame can be filtered using
>>> sframe[binary_filter]
where sframe is an SFrame and binary_filter is an SArray of the same length.
The result is a new SFrame which contains only rows of the SFrame where its
matching row in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance, given an SFrame
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
See :class:`~graphlab.SArray` for more details on the use of the logical
filter.
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
Or alternatively:
>>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)]
Create an SFrame from a Python dictionary.
>>> from graphlab import SFrame
>>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
"""
__slots__ = ['shape', '__proxy__', '_proxy']
def __init__(self, data=None,
format='auto',
_proxy=None):
"""__init__(data=list(), format='auto')
Construct a new SFrame from a url or a pandas.DataFrame.
"""
# emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http)
tracker = _mt._get_metric_tracker()
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySFrameProxy(glconnect.get_client())
_format = None
if (format == 'auto'):
if (HAS_PANDAS and isinstance(data, pandas.DataFrame)):
_format = 'dataframe'
tracker.track('sframe.location.memory', value=1)
elif (isinstance(data, str) or isinstance(data, unicode)):
if data.find('://') == -1:
suffix = 'local'
else:
suffix = data.split('://')[0]
tracker.track(('sframe.location.%s' % (suffix)), value=1)
if data.endswith(('.csv', '.csv.gz')):
_format = 'csv'
elif data.endswith(('.tsv', '.tsv.gz')):
_format = 'tsv'
elif data.endswith(('.txt', '.txt.gz')):
print "Assuming file is csv. For other delimiters, " + \
"please use `SFrame.read_csv`."
_format = 'csv'
else:
_format = 'sframe'
elif type(data) == SArray:
_format = 'sarray'
elif isinstance(data, SFrame):
_format = 'sframe_obj'
elif (hasattr(data, 'iteritems')):
_format = 'dict'
tracker.track('sframe.location.memory', value=1)
elif hasattr(data, '__iter__'):
_format = 'array'
tracker.track('sframe.location.memory', value=1)
elif data is None:
_format = 'empty'
else:
raise ValueError('Cannot infer input type for data ' + str(data))
else:
_format = format
tracker.track(('sframe.format.%s' % _format), value=1)
with cython_context():
if (_format == 'dataframe'):
self.__proxy__.load_from_dataframe(data)
elif (_format == 'sframe_obj'):
for col in data.column_names():
self.__proxy__.add_column(data[col].__proxy__, col)
elif (_format == 'sarray'):
self.__proxy__.add_column(data.__proxy__, "")
elif (_format == 'array'):
if len(data) > 0:
unique_types = set([type(x) for x in data if x is not None])
if len(unique_types) == 1 and SArray in unique_types:
for arr in data:
self.add_column(arr)
elif SArray in unique_types:
raise ValueError("Cannot create SFrame from mix of regular values and SArrays")
else:
self.__proxy__.add_column(SArray(data).__proxy__, "")
elif (_format == 'dict'):
for key,val in iter(sorted(data.iteritems())):
if (type(val) == SArray):
self.__proxy__.add_column(val.__proxy__, key)
else:
self.__proxy__.add_column(SArray(val).__proxy__, key)
elif (_format == 'csv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter=',', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'tsv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter='\t', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'sframe'):
url = _make_internal_url(data)
self.__proxy__.load_from_sframe_index(url)
elif (_format == 'empty'):
pass
else:
raise ValueError('Unknown input type: ' + format)
sframe_size = -1
if self.__has_size__():
sframe_size = self.num_rows()
tracker.track('sframe.row.size', value=sframe_size)
tracker.track('sframe.col.size', value=self.num_cols())
@staticmethod
def _infer_column_types_from_lines(first_rows):
if (len(first_rows.column_names()) < 1):
print "Insufficient number of columns to perform type inference"
raise RuntimeError("Insufficient columns ")
if len(first_rows) < 1:
print "Insufficient number of rows to perform type inference"
raise RuntimeError("Insufficient rows")
# gets all the values column-wise
all_column_values_transposed = [list(first_rows[col])
for col in first_rows.column_names()]
# transpose
all_column_values = [list(x) for x in zip(*all_column_values_transposed)]
all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values]
# collect the hints
# if every line was inferred to have a different number of elements, die
if len(set(len(x) for x in all_column_type_hints)) != 1:
print "Unable to infer column types. Defaulting to str"
return str
import types
column_type_hints = all_column_type_hints[0]
# now perform type combining across rows
for i in range(1, len(all_column_type_hints)):
currow = all_column_type_hints[i]
for j in range(len(column_type_hints)):
# combine types
d = set([currow[j], column_type_hints[j]])
if (len(d) == 1):
# easy case. both agree on the type
continue
if ((int in d) and (float in d)):
# one is an int, one is a float. its a float
column_type_hints[j] = float
elif ((array.array in d) and (list in d)):
# one is an array , one is a list. its a list
column_type_hints[j] = list
elif types.NoneType in d:
# one is a NoneType. assign to other type
if currow[j] != types.NoneType:
column_type_hints[j] = currow[j]
else:
column_type_hints[j] = str
# final pass. everything whih is still NoneType is now a str
for i in range(len(column_type_hints)):
if column_type_hints[i] == types.NoneType:
column_type_hints[i] = str
return column_type_hints
@classmethod
def _read_csv_impl(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True,
store_errors=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and optionally
(if store_errors=True) a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
store_errors : bool
If true, the output errors dict will be filled.
See `read_csv` for the rest of the parameters.
"""
parsing_config = dict()
parsing_config["delimiter"] = delimiter
parsing_config["use_header"] = header
parsing_config["continue_on_failure"] = not error_bad_lines
parsing_config["comment_char"] = comment_char
parsing_config["escape_char"] = escape_char
parsing_config["double_quote"] = double_quote
parsing_config["quote_char"] = quote_char
parsing_config["skip_initial_space"] = skip_initial_space
parsing_config["store_errors"] = store_errors
if type(na_values) is str:
na_values = [na_values]
if na_values is not None and len(na_values) > 0:
parsing_config["na_values"] = na_values
if nrows != None:
parsing_config["row_limit"] = nrows
proxy = UnitySFrameProxy(glconnect.get_client())
internal_url = _make_internal_url(url)
if (not verbose):
glconnect.get_client().set_log_progress(False)
# Attempt to automatically detect the column types. Either produce a
# list of types; otherwise default to all str types.
column_type_inference_was_used = False
if column_type_hints is None:
try:
# Get the first 100 rows (using all the desired arguments).
first_rows = graphlab.SFrame.read_csv(url, nrows=100,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values = na_values)
column_type_hints = SFrame._infer_column_types_from_lines(first_rows)
typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']'
print "------------------------------------------------------"
print "Inferred types from first line of file as "
print "column_type_hints="+ typelist
print "If parsing fails due to incorrect types, you can correct"
print "the inferred type list above and pass it to read_csv in"
print "the column_type_hints argument"
print "------------------------------------------------------"
column_type_inference_was_used = True
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
# If the above fails, default back to str for all columns.
column_type_hints = str
print 'Could not detect types. Using str for each column.'
if type(column_type_hints) is type:
type_hints = {'__all_columns__': column_type_hints}
elif type(column_type_hints) is list:
type_hints = dict(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints))
elif type(column_type_hints) is dict:
type_hints = column_type_hints
else:
raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.")
_mt._get_metric_tracker().track('sframe.csv.parse')
suffix=''
if url.find('://') == -1:
suffix = 'local'
else:
suffix = url.split('://')[0]
_mt._get_metric_tracker().track(('sframe.location.%s' % (suffix)), value=1)
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
if column_type_inference_was_used:
# try again
print "Unable to parse the file with automatic type inference."
print "Defaulting to column_type_hints=str"
type_hints = {'__all_columns__': str}
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except:
raise
else:
raise
glconnect.get_client().set_log_progress(True)
return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.iteritems() })
@classmethod
def read_csv_with_errors(cls,
url,
delimiter=',',
header=True,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> (sf, bad_lines) = graphlab.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=True)
@classmethod
def read_csv(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names : 'X1, X2, ...'.
error_bad_lines : bool
If true, will fail upon encountering a bad line. If false, will
continue parsing skipping lines which fail to parse correctly.
A sample of the first 10 encountered bad lines will be printed.
comment_char : string, optional
The character which denotes that the remainder of the line is a
comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : SFrame
See Also
--------
read_csv_with_errors, SFrame
Examples
--------
Read a regular csv file, with all default options, automatically
determine types:
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Read only the first 100 lines of the csv file:
>>> sf = graphlab.SFrame.read_csv(url, nrows=100)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 100
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[100 rows x 3 columns]
Read all columns as str type
>>> sf = graphlab.SFrame.read_csv(url, column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Specify types for a subset of columns and leave the rest to be str.
>>> sf = graphlab.SFrame.read_csv(url,
... column_type_hints={
... 'user_id':int, 'rating':float
... })
>>> sf
Columns:
user_id str
movie_id str
rating float
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3.0 |
| 25907 | 1663 | 3.0 |
| 25923 | 1663 | 3.0 |
| 25924 | 1663 | 3.0 |
| 25928 | 1663 | 2.0 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Not treat first line as header:
>>> sf = graphlab.SFrame.read_csv(url, header=False)
>>> sf
Columns:
X1 str
X2 str
X3 str
Rows: 10001
+---------+----------+--------+
| X1 | X2 | X3 |
+---------+----------+--------+
| user_id | movie_id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10001 rows x 3 columns]
Treat '3' as missing value:
>>> sf = graphlab.SFrame.read_csv(url, na_values=['3'], column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | None |
| 25907 | 1663 | None |
| 25923 | 1663 | None |
| 25924 | 1663 | None |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Throw error on parse failure:
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> sf = graphlab.SFrame.read_csv(bad_url, error_bad_lines=True)
RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c"
Set error_bad_lines=False to skip bad lines
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=error_bad_lines,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=False)[0]
def to_schema_rdd(self,sc,sql,number_of_partitions=4):
"""
Convert the current SFrame to the Spark SchemaRDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
sql : SQLContext
sql is an existing SQLContext.
number_of_partitions : int
number of partitions for the output rdd
Returns
----------
out: SchemaRDD
Examples
--------
>>> from pyspark import SparkContext, SQLContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sqlc = SQLContext(sc)
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_schema_rdd(sc, sqlc)
>>> rdd.collect()
[Row(x=1, y=u'fish'), Row(x=2, y=u'chips'), Row(x=3, y=u'salad')]
"""
def homogeneous_type(seq):
if seq is None or len(seq) == 0:
return True
iseq = iter(seq)
first_type = type(next(iseq))
return True if all( (type(x) is first_type) for x in iseq ) else False
if len(self) == 0:
raise ValueError("SFrame is empty")
column_names = self.column_names()
first_row = self.head(1)[0]
for name in column_names:
if hasattr(first_row[name],'__iter__') and homogeneous_type(first_row[name]) is not True:
raise TypeError("Support for translation to Spark SchemaRDD not enabled for heterogeneous iterable type (column: %s). Use SFrame.to_rdd()." % name)
for _type in self.column_types():
if(_type.__name__ == 'datetime'):
raise TypeError("Support for translation to Spark SchemaRDD not enabled for datetime type. Use SFrame.to_rdd() ")
rdd = self.to_rdd(sc,number_of_partitions);
from pyspark.sql import Row
rowRdd = rdd.map(lambda x: Row(**x))
return sql.inferSchema(rowRdd)
def to_rdd(self, sc, number_of_partitions=4):
"""
Convert the current SFrame to the Spark RDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
number_of_partitions: int
number of partitions for the output rdd
Returns
----------
out: RDD
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_rdd(sc)
>>> rdd.collect()
[{'x': 1L, 'y': 'fish'}, {'x': 2L, 'y': 'chips'}, {'x': 3L, 'y': 'salad'}]
"""
_mt._get_metric_tracker().track('sframe.to_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
for _type in self.column_types():
if(_type.__name__ == 'Image'):
raise TypeError("Support for translation to Spark RDDs not enabled for Image type.")
if type(number_of_partitions) is not int:
raise ValueError("number_of_partitions parameter expects an integer type")
if number_of_partitions == 0:
raise ValueError("number_of_partitions can not be initialized to zero")
# Save SFrame in a temporary place
tmp_loc = self.__get_staging_dir__(sc)
sf_loc = os.path.join(tmp_loc, str(uuid.uuid4()))
self.save(sf_loc)
# Keep track of the temporary sframe that is saved(). We need to delete it eventually.
dummysf = load_sframe(sf_loc)
dummysf.__proxy__.delete_on_close()
SFRAME_GARBAGE_COLLECTOR.append(dummysf)
sframe_len = self.__len__()
small_partition_size = sframe_len/number_of_partitions
big_partition_size = small_partition_size + 1
num_big_partition_size = sframe_len % number_of_partitions
num_small_partition_size = number_of_partitions - num_big_partition_size
count = 0
start_index = 0
ranges = []
while(count < number_of_partitions):
if(count < num_big_partition_size):
ranges.append((str(start_index)+":"+str(start_index + big_partition_size)))
start_index = start_index + big_partition_size
else:
ranges.append((str(start_index)+":"+str(start_index + small_partition_size)))
start_index = start_index + small_partition_size
count+=1
from pyspark import RDD
rdd = sc.parallelize(ranges,number_of_partitions)
if sc.master[0:5] == 'local':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + \
" " + BINARY_PATHS['SFRAME_RDD_PATH'] + " " + sf_loc)
elif sc.master == 'yarn-client':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] + \
" " + "./" + SPARK_SUPPORT_NAMES['SFRAME_RDD_PATH'] + \
" " + sf_loc)
serializedRdd = sc._jvm.org.graphlab.create.GraphLabUtil.stringToByte(pipeRdd)
import pyspark
output_rdd = RDD(serializedRdd,sc,pyspark.serializers.PickleSerializer())
return output_rdd
@classmethod
def __get_staging_dir__(cls,cur_sc):
if not RDD_SUPPORT_INITED:
__rdd_support_init__(cur_sc)
return STAGING_DIR
@classmethod
def from_rdd(cls, rdd):
"""
Convert a Spark RDD into a GraphLab Create SFrame.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
rdd : pyspark.rdd.RDD
Returns
-------
out : SFrame
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> rdd = sc.parallelize([1,2,3])
>>> sf = SFrame.from_rdd(rdd)
>>> sf
Data:
+-----+
| X1 |
+-----+
| 1.0 |
| 2.0 |
| 3.0 |
+-----+
[3 rows x 1 columns]
"""
_mt._get_metric_tracker().track('sframe.from_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
checkRes = rdd.take(1);
if len(checkRes) > 0 and checkRes[0].__class__.__name__ == 'Row' and rdd.__class__.__name__ not in {'SchemaRDD','DataFrame'}:
raise Exception("Conversion from RDD(pyspark.sql.Row) to SFrame not supported. Please call inferSchema(RDD) first.")
if(rdd._jrdd_deserializer.__class__.__name__ == 'UTF8Deserializer'):
return SFrame.__from_UTF8Deserialized_rdd__(rdd)
sf_names = None
rdd_type = "rdd"
if rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}:
rdd_type = "schemardd"
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
else:
sf_names = first_row.__FIELDS__
sf_names = [str(i) for i in sf_names]
cur_sc = rdd.ctx
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
mode = "batch"
if(rdd._jrdd_deserializer.__class__.__name__ == 'PickleSerializer'):
mode = "pickle"
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " + \
BINARY_PATHS['RDD_SFRAME_PATH'] + " " + tmp_loc +\
" " + mode + " " + rdd_type)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" + SPARK_SUPPORT_NAMES['RDD_SFRAME_PATH'] + " " +\
tmp_loc + " " + mode + " " + rdd_type)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf_coltypes = out_sf.column_types()
if(len(out_sf_coltypes) != 0):
sf_coltypes = sf.column_types()
sf_temp_names = sf.column_names()
out_sf_temp_names = out_sf.column_names()
for i in range(len(sf_coltypes)):
if sf_coltypes[i] != out_sf_coltypes[i]:
print "mismatch for types %s and %s" % (sf_coltypes[i],out_sf_coltypes[i])
sf[sf_temp_names[i]] = sf[sf_temp_names[i]].astype(str)
out_sf[out_sf_temp_names[i]] = out_sf[out_sf_temp_names[i]].astype(str)
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def __from_UTF8Deserialized_rdd__(cls, rdd):
_mt._get_metric_tracker().track('sframe.__from_UTF8Deserialized_rdd__')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
cur_sc = rdd.ctx
sf_names = None
sf_types = None
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
if(rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}):
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
sf_types = [type(i) for i in first_row.values()]
else:
sf_names = first_row.__FIELDS__
sf_types = [type(i) for i in first_row]
sf_names = [str(i) for i in sf_names]
for _type in sf_types:
if(_type != int and _type != str and _type != float and _type != unicode):
raise TypeError("Only int, str, and float are supported for now")
types = ""
for i in sf_types:
types += i.__name__ + ","
if cur_sc.master[0:5] == 'local':
t = rdd._jschema_rdd.toJavaStringOfValues().pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " + tmp_loc +\
" " + types)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.toJavaStringOfValues(
rdd._jschema_rdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc + " " + types)
else:
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def from_odbc(cls, db, sql, verbose=False):
"""
Convert a table or query from a database to an SFrame.
This function does not do any checking on the given SQL query, and
cannot know what effect it will have on the database. Any side effects
from the query will be reflected on the database. If no result
rows are returned, an empty SFrame is created.
Keep in mind the default case your database stores table names in. In
some cases, you may need to add quotation marks (or whatever character
your database uses to quote identifiers), especially if you created the
table using `to_odbc`.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
sql : str
A SQL query. The query must be acceptable by the ODBC driver used by
`graphlab.extensions._odbc_connection.unity_odbc_connection`.
Returns
-------
out : SFrame
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
does not apply to the machine your database is running, which can (and
often will) be running on a separate machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> a_table = graphlab.SFrame.from_odbc(db, "SELECT * FROM a_table")
>>> join_result = graphlab.SFrame.from_odbc(db, 'SELECT * FROM "MyTable" a, "AnotherTable" b WHERE a.id=b.id')
"""
result = db.execute_query(sql)
if not isinstance(result, SFrame):
raise RuntimeError("Cannot create an SFrame for query. No result set.")
cls = result
return cls
def to_odbc(self, db, table_name, append_if_exists=False, verbose=True):
"""
Convert an SFrame to a table in a database.
By default, searches for a table in the database with the given name.
If found, this will attempt to append all the rows of the SFrame to the
end of the table. If not, this will create a new table with the given
name. This behavior is toggled with the `append_if_exists` flag.
When creating a new table, GraphLab Create uses a heuristic approach to
pick a corresponding type for each column in the SFrame using the type
information supplied by the database's ODBC driver. Your driver must
support giving this type information for GraphLab Create to support
writing to the database.
To allow more expressive and accurate naming, `to_odbc` puts quotes
around each identifier (table names and column names). Depending on
your database, you may need to refer to the created table with quote
characters around the name. This character is not the same for all
databases, but '"' is the most common.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
table_name : str
The name of the table you would like to create/append to.
append_if_exists : bool
If True, this will attempt to append to the table named `table_name`
if it is found to exist in the database.
verbose : bool
Print progress updates on the insertion process.
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
"local machine" rule does not apply to the machine your database is
running on, which can (and often will) be running on a separate
machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> sf = graphlab.SFrame({'a':[1,2,3],'b':['hi','pika','bye']})
>>> sf.to_odbc(db, 'a_cool_table')
"""
if (not verbose):
glconnect.get_client().set_log_progress(False)
db._insert_sframe(self, table_name, append_if_exists)
if (not verbose):
glconnect.get_client().set_log_progress(True)
def __repr__(self):
"""
Returns a string description of the frame
"""
printed_sf = self._imagecols_to_stringcols()
ret = self.__get_column_description__()
if self.__has_size__():
ret = ret + "Rows: " + str(len(self)) + "\n\n"
else:
ret = ret + "Rows: Unknown" + "\n\n"
ret = ret + "Data:\n"
if (len(printed_sf.head()) > 0):
ret = ret + str(self)
else:
ret = ret + "\t[]"
return ret
def __get_column_description__(self):
colnames = self.column_names()
coltypes = self.column_types()
ret = "Columns:\n"
if len(colnames) > 0:
for i in range(len(colnames)):
ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n"
ret = ret + "\n"
else:
ret = ret + "\tNone\n\n"
return ret
def __get_pretty_tables__(self, wrap_text=False, max_row_width=80,
max_column_width=30, max_columns=20,
max_rows_to_display=60):
"""
Returns a list of pretty print tables representing the current SFrame.
If the number of columns is larger than max_columns, the last pretty
table will contain an extra column of "...".
Parameters
----------
wrap_text : bool, optional
max_row_width : int, optional
Max number of characters per table.
max_column_width : int, optional
Max number of characters per column.
max_columns : int, optional
Max number of columns per table.
max_rows_to_display : int, optional
Max number of rows to display.
Returns
-------
out : list[PrettyTable]
"""
headsf = self.head(max_rows_to_display)
if headsf.shape == (0, 0):
return [PrettyTable()]
# convert array.array column to list column so they print like [...]
# and not array('d', ...)
for col in headsf.column_names():
if headsf[col].dtype() is array.array:
headsf[col] = headsf[col].astype(list)
def _value_to_str(value):
if (type(value) is array.array):
return str(list(value))
elif (type(value) is list):
return '[' + ", ".join(_value_to_str(x) for x in value) + ']'
else:
return str(value)
def _escape_space(s):
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def _truncate_respect_unicode(s, max_length):
if (len(s) <= max_length):
return s
else:
u = unicode(s, 'utf-8', errors='replace')
return u[:max_length].encode('utf-8')
def _truncate_str(s, wrap_str=False):
"""
Truncate and optionally wrap the input string as unicode, replace
unconvertible character with a diamond ?.
"""
s = _escape_space(s)
if len(s) <= max_column_width:
return unicode(s, 'utf-8', errors='replace')
else:
ret = ''
# if wrap_str is true, wrap the text and take at most 2 rows
if wrap_str:
wrapped_lines = wrap(s, max_column_width)
if len(wrapped_lines) == 1:
return wrapped_lines[0]
last_line = wrapped_lines[1]
if len(last_line) >= max_column_width:
last_line = _truncate_respect_unicode(last_line, max_column_width - 4)
ret = wrapped_lines[0] + '\n' + last_line + ' ...'
else:
ret = _truncate_respect_unicode(s, max_column_width - 4) + '...'
return unicode(ret, 'utf-8', errors='replace')
columns = self.column_names()[:max_columns]
columns.reverse() # reverse the order of columns and we will pop from the end
num_column_of_last_table = 0
row_of_tables = []
# let's build a list of tables with max_columns
# each table should satisfy, max_row_width, and max_column_width
while len(columns) > 0:
tbl = PrettyTable()
table_width = 0
num_column_of_last_table = 0
while len(columns) > 0:
col = columns.pop()
# check the max length of element in the column
if len(headsf) > 0:
col_width = min(max_column_width, max(len(str(x)) for x in headsf[col]))
else:
col_width = max_column_width
if (table_width + col_width < max_row_width):
# truncate the header if necessary
header = _truncate_str(col, wrap_text)
tbl.add_column(header, [_truncate_str(_value_to_str(x), wrap_text) for x in headsf[col]])
table_width = str(tbl).find('\n')
num_column_of_last_table += 1
else:
# the column does not fit in the current table, push it back to columns
columns.append(col)
break
tbl.align = 'c'
row_of_tables.append(tbl)
# add a column of all "..." if there are more columns than displayed
if self.num_cols() > max_columns:
row_of_tables[-1].add_column('...', ['...'] * len(headsf))
num_column_of_last_table += 1
# add a row of all "..." if there are more rows than displayed
if self.__has_size__() and self.num_rows() > headsf.num_rows():
row_of_tables[-1].add_row(['...'] * num_column_of_last_table)
return row_of_tables
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
See Also
--------
head, tail
"""
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _imagecols_to_stringcols(self, num_rows=10):
# A list of column types
types = self.column_types()
# A list of indexable column names
names = self.column_names()
# Constructing names of sframe columns that are of image type
image_column_names = [names[i] for i in range(len(names)) if types[i] == graphlab.Image]
#If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string
if len(image_column_names) > 0:
printed_sf = SFrame()
for t in names:
if t in image_column_names:
printed_sf[t] = self[t]._head_str(num_rows)
else:
printed_sf[t] = self[t].head(num_rows)
else:
printed_sf = self
return printed_sf
def __str__(self, num_rows=10, footer=True):
"""
Returns a string containing the first 10 elements of the frame, along
with a description of the frame.
"""
MAX_ROWS_TO_DISPLAY = num_rows
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if (not footer):
return '\n'.join([str(tb) for tb in row_of_tables])
if self.__has_size__():
footer = '[%d rows x %d columns]\n' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '\n'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]\n' % self.num_columns()
footer += '\n'.join(LAZY_FOOTER_STRS)
return '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _repr_html_(self):
MAX_ROWS_TO_DISPLAY = 10
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=True, max_row_width=120, max_columns=40, max_column_width=25, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if self.__has_size__():
footer = '[%d rows x %d columns]<br/>' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '<br/>'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]<br/>' % self.num_columns()
footer += '<br/>'.join(LAZY_FOOTER_STRS)
begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">'
end = '\n</div>'
return begin + '\n'.join([tb.get_html_string(format=True) for tb in row_of_tables]) + "\n" + footer + end
def __nonzero__(self):
"""
Returns true if the frame is not empty.
"""
return self.num_rows() != 0
def __len__(self):
"""
Returns the number of rows of the sframe.
"""
return self.num_rows()
def __copy__(self):
"""
Returns a shallow copy of the sframe.
"""
return self.select_columns(self.column_names())
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
def dtype(self):
"""
The type of each column.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
column_types
"""
return self.column_types()
def num_rows(self):
"""
The number of rows in this SFrame.
Returns
-------
out : int
Number of rows in the SFrame.
See Also
--------
num_columns
"""
return self.__proxy__.num_rows()
def num_cols(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_columns, num_rows
"""
return self.__proxy__.num_columns()
def num_columns(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_cols, num_rows
"""
return self.__proxy__.num_columns()
def column_names(self):
"""
The name of each column in the SFrame.
Returns
-------
out : list[string]
Column names of the SFrame.
See Also
--------
rename
"""
return self.__proxy__.column_names()
def column_types(self):
"""
The type of each column in the SFrame.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
dtype
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
The first n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the first n rows of the current SFrame
See Also
--------
tail, print_rows
"""
return SFrame(_proxy=self.__proxy__.head(n))
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
"""
assert HAS_PANDAS
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
df[column_name] = df[column_name].astype(self.column_types()[i])
return df
def tail(self, n=10):
"""
The last n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the last n rows of the current SFrame
See Also
--------
head, print_rows
"""
return SFrame(_proxy=self.__proxy__.tail(n))
def apply(self, fn, dtype=None, seed=None):
"""
Transform each row to an :class:`~graphlab.SArray` according to a
specified function. Returns a new SArray of ``dtype`` where each element
in this SArray is transformed by `fn(x)` where `x` is a single row in
the sframe represented as a dictionary. The ``fn`` should return
exactly one value which can be cast into type ``dtype``. If ``dtype`` is
not specified, the first 100 rows of the SFrame are used to make a guess
of the target data type.
Parameters
----------
fn : function
The function to transform each row of the SFrame. The return
type should be convertible to `dtype` if `dtype` is not None.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : dtype, optional
The dtype of the new SArray. If None, the first 100
elements of the array are used to guess the target
data type.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SArray
The SArray transformed by fn. Each element of the SArray is of
type ``dtype``
Examples
--------
Concatenate strings from several columns:
>>> sf = graphlab.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6],
'rating': [4, 5, 1]})
>>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating']))
dtype: str
Rows: 3
['134', '235', '361']
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
double mean(const std::map<flexible_type, flexible_type>& dict) {
double sum = 0.0;
for (const auto& kv: dict) sum += (double)kv.second;
return sum / dict.size();
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(mean, "row");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sf = graphlab.SFrame({'x0': [1, 2, 3], 'x1': [2, 3, 1],
... 'x2': [3, 1, 2]})
>>> sf.apply(example.mean)
dtype: float
Rows: 3
[2.0,2.0,2.0]
"""
assert _is_callable(fn), "Input must be a function"
test_sf = self[:10]
dryrun = [fn(row) for row in test_sf]
if dtype is None:
dtype = SArray(dryrun).dtype()
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.apply')
nativefn = None
try:
import graphlab.extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed))
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
def flat_map(self, column_names, fn, column_types='auto', seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = graphlab.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.itervalues()) for i in range(0, x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
"""
assert inspect.isfunction(fn), "Input must be a function"
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.flat_map')
# determine the column_types
if column_types == 'auto':
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError, \
"Could not infer output column types from the first ten rows " +\
"of the SFrame. Please use the 'column_types' parameter to " +\
"set the types."
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list
assert len(column_types) == len(column_names), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed))
def sample(self, fraction, seed=None):
"""
Sample the current SFrame's rows.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch. Must be between 0 and 1.
The number of rows returned is approximately the fraction times the
number of rows.
seed : int, optional
Seed for the random number generator used to sample.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if not seed:
seed = int(time.time())
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
_mt._get_metric_tracker().track('sframe.sample')
if (self.num_rows() == 0 or self.num_cols() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed))
def random_split(self, fraction, seed=None):
"""
Randomly split the rows of an SFrame into two SFrames. The first SFrame
contains *M* rows, sampled uniformly (without replacement) from the
original SFrame. *M* is approximately the fraction times the original
number of rows. The second SFrame contains the remaining rows of the
original SFrame.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch for the first returned
SFrame. Must be between 0 and 1.
seed : int, optional
Seed for the random number generator used to split.
Returns
-------
out : tuple [SFrame]
Two new SFrames.
Examples
--------
Suppose we have an SFrame with 1,024 rows and we want to randomly split
it into training and testing datasets with about a 90%/10% split.
>>> sf = graphlab.SFrame({'id': range(1024)})
>>> sf_train, sf_test = sf.random_split(.9, seed=5)
>>> print len(sf_train), len(sf_test)
922 102
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_cols() == 0):
return (SFrame(), SFrame())
if not seed:
seed = int(time.time())
# The server side requires this to be an int, so cast if we can
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
_mt._get_metric_tracker().track('sframe.random_split')
with cython_context():
proxy_pair = self.__proxy__.random_split(fraction, seed)
return (SFrame(data=[], _proxy=proxy_pair[0]), SFrame(data=[], _proxy=proxy_pair[1]))
def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = graphlab.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
_mt._get_metric_tracker().track('sframe.topk')
sf = self[self[column_name].topk_index(k, reverse)]
return sf.sort(column_name, ascending=reverse)
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
_mt._get_metric_tracker().track('sframe.save', properties={'format':format})
if format == None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
else:
raise ValueError("Unsupported format: {}".format(format))
def select_column(self, key):
"""
Get a reference to the :class:`~graphlab.SArray` that corresponds with
the given key. Throws an exception if the key is something other than a
string or if the key is not found.
Parameters
----------
key : str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``key``.
See Also
--------
select_columns
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(key, str):
raise TypeError("Invalid key type: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(key))
def select_columns(self, keylist):
"""
Get SFrame composed only of the columns referred to in the given list of
keys. Throws an exception if ANY of the keys are not in this SFrame or
if ``keylist`` is anything other than a list of strings.
Parameters
----------
keylist : list[str]
The list of column names.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``keylist`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not hasattr(keylist, '__iter__'):
raise TypeError("keylist must be an iterable")
if not all([isinstance(x, str) for x in keylist]):
raise TypeError("Invalid key type: must be str")
key_set = set(keylist)
if (len(key_set)) != len(keylist):
for key in key_set:
if keylist.count(key) > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(keylist))
def add_column(self, data, name=""):
"""
Add a column to this SFrame. The number of elements in the data given
must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self. If no
name is given, a default name is chosen.
Parameters
----------
data : SArray
The 'column' of data to add.
name : string, optional
The name of the column. If no name is given, a default name is
chosen.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = graphlab.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalant to `sf['species'] = sa`
>>> sf.add_column(sa, name='species')
>>> sf
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
with cython_context():
self.__proxy__.add_column(data.__proxy__, name)
return self
def add_columns(self, data, namelist=None):
"""
Adds multiple columns to this SFrame. The number of elements in all
columns must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
namelist : list of string, optional
A list of column names. All names must be specified. ``namelist`` is
ignored if data is an SFrame.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = graphlab.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> sf.add_columns(sf2)
>>> sf
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
namelist = other.column_names()
my_columns = set(self.column_names())
for name in namelist:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not hasattr(datalist, '__iter__'):
raise TypeError("datalist must be an iterable")
if not hasattr(namelist, '__iter__'):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list : must all be str")
with cython_context():
self.__proxy__.add_columns([x.__proxy__ for x in datalist], namelist)
return self
def remove_column(self, name):
"""
Remove a column from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
name : string
The name of the column to remove.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> sf.remove_column('val')
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
colid = self.column_names().index(name)
with cython_context():
self.__proxy__.remove_column(colid)
return self
def remove_columns(self, column_names):
"""
Remove one or more columns from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> sf.remove_columns(['val1', 'val2'])
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError('Cannot find column %s' % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
for colid in reversed(deletion_indices):
with cython_context():
self.__proxy__.remove_column(colid)
return self
def swap_columns(self, column_1, column_2):
"""
Swap the columns with the given names. This operation modifies the
current SFrame in place and returns self.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf.swap_columns('id', 'val')
>>> sf
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
"""
colnames = self.column_names()
colid_1 = colnames.index(column_1)
colid_2 = colnames.index(column_2)
with cython_context():
self.__proxy__.swap_columns(colid_1, colid_2)
return self
def rename(self, names):
"""
Rename the given columns. ``names`` is expected to be a dict specifying
the old and new names. This changes the names of the columns given as
the keys and replaces them with the names given as the values. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> sf.rename({'X1': 'name', 'X2':'address'})
>>> sf
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError('Cannot find column %s in the SFrame' % k)
with cython_context():
for k in names:
colid = self.column_names().index(k)
self.__proxy__.set_column_name(colid, names[k])
return self
def __getitem__(self, key):
"""
This method does things based on the type of `key`.
If `key` is:
* str
Calls `select_column` on `key`
* SArray
Performs a logical filter. Expects given SArray to be the same
length as all columns in current SFrame. Every row
corresponding with an entry in the given SArray that is
equivalent to False is filtered from the result.
* int
Returns a single row of the SFrame (the `key`th one) as a dictionary.
* slice
Returns an SFrame including only the sliced rows.
"""
if type(key) is SArray:
return self._row_selector(key)
elif type(key) is list:
return self.select_columns(key)
elif type(key) is str:
return self.select_column(key)
elif type(key) is int:
if key < 0:
key = len(self) + key
if key >= len(self):
raise IndexError("SFrame index out of range")
return list(SFrame(_proxy = self.__proxy__.copy_range(key, 1, key+1)))[0]
elif type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
# handle negative indices
if start < 0:
start = len(self) + start
if stop < 0:
stop = len(self) + stop
return SFrame(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise TypeError("Invalid index type: must be SArray, list, or str")
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if type(key) is list:
self.add_columns(value, key)
elif type(key) is str:
sa_value = None
if (type(value) is SArray):
sa_value = value
elif hasattr(value, '__iter__'): # wrap list, array... to sarray
sa_value = SArray(value)
else: # create an sarray of constant value
sa_value = SArray.from_const(value, self.num_rows())
# set new column
if not key in self.column_names():
with cython_context():
self.add_column(sa_value, key)
else:
# special case if replacing the only column.
# server would fail the replacement if the new column has different
# length than current one, which doesn't make sense if we are replacing
# the only column. To support this, we first take out the only column
# and then put it back if exception happens
single_column = (self.num_cols() == 1)
if (single_column):
tmpname = key
saved_column = self.select_column(key)
self.remove_column(key)
else:
# add the column to a unique column name.
tmpname = '__' + '-'.join(self.column_names())
try:
self.add_column(sa_value, tmpname)
except Exception as e:
if (single_column):
self.add_column(saved_column, key)
raise
if (not single_column):
# if add succeeded, remove the column name and rename tmpname->columnname.
self.swap_columns(key, tmpname)
self.remove_column(key)
self.rename({tmpname: key})
else:
raise TypeError('Cannot set column with key type ' + str(type(key)))
def __delitem__(self, key):
"""
Wrapper around remove_column.
"""
self.remove_column(key)
def __materialize__(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def __is_materialized__(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__proxy__.is_materialized()
def __has_size__(self):
"""
Returns whether or not the size of the SFrame is known.
"""
return self.__proxy__.has_size()
def __iter__(self):
"""
Provides an iterator to the rows of the SFrame.
"""
_mt._get_metric_tracker().track('sframe.__iter__')
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
column_names = self.column_names()
while(True):
for j in ret:
yield dict(zip(column_names, j))
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = graphlab.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.append')
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break;
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name);
processed_other_frame.add_column(other_column, col_name)
# check column type
if my_column_types[i] != other_column.dtype():
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype()))
with cython_context():
processed_other_frame.__materialize__()
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__))
def groupby(self, key_columns, operations, *args):
"""
Perform a group on the key_columns followed by aggregations on the
columns listed in operations.
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on. The
available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT,
SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators
MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and
VAR. See :mod:`~graphlab.aggregate` for more detail on the aggregators.
Parameters
----------
key_columns : string | list[string]
Column(s) to group by. Key columns can be of any type other than
dictionary.
operations : dict, list
Dictionary of columns and aggregation operations. Each key is a
output column name and each value is an aggregator. This can also
be a list of aggregators, in which case column names will be
automatically assigned.
*args
All other remaining arguments will be interpreted in the same
way as the operations argument.
Returns
-------
out_sf : SFrame
A new SFrame, with a column for each groupby column and each
aggregation operation.
See Also
--------
aggregate
Examples
--------
Suppose we have an SFrame with movie ratings by many users.
>>> import graphlab.aggregate as agg
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Compute the number of occurrences of each user.
>>> user_count = sf.groupby(key_columns='user_id',
... operations={'count': agg.COUNT()})
>>> user_count
+---------+-------+
| user_id | count |
+---------+-------+
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
| ... | ... |
+---------+-------+
[9852 rows x 2 columns]
Compute the mean and standard deviation of ratings per user.
>>> user_rating_stats = sf.groupby(key_columns='user_id',
... operations={
... 'mean_rating': agg.MEAN('rating'),
... 'std_rating': agg.STD('rating')
... })
>>> user_rating_stats
+---------+-------------+------------+
| user_id | mean_rating | std_rating |
+---------+-------------+------------+
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
| ... | ... | ... |
+---------+-------------+------------+
[9852 rows x 3 columns]
Compute the movie with the minimum rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={
... 'worst_movies': agg.ARGMIN('rating','movie_id')
... })
>>> chosen_movies
+---------+-------------+
| user_id | worst_movies |
+---------+-------------+
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
| ... | ... |
+---------+-------------+
[9852 rows x 2 columns]
Compute the movie with the max rating per user and also the movie with
the maximum imdb-ranking per user.
>>> sf['imdb-ranking'] = sf['rating'] * 10
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')})
>>> chosen_movies
+---------+------------------+------------------------+
| user_id | max_rating_movie | max_imdb_ranking_movie |
+---------+------------------+------------------------+
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
| ... | ... | ... |
+---------+------------------+------------------------+
[9852 rows x 3 columns]
Compute the movie with the max rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={'best_movies': agg.ARGMAX('rating','movie')})
Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')})
Compute the count, mean, and standard deviation of ratings per (user,
time), automatically assigning output column names.
>>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000)
>>> user_rating_stats = sf.groupby(['user_id', 'time'],
... [agg.COUNT(),
... agg.AVG('rating'),
... agg.STDV('rating')])
>>> user_rating_stats
+------+---------+-------+---------------+----------------+
| time | user_id | Count | Avg of rating | Stdv of rating |
+------+---------+-------+---------------+----------------+
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
| ... | ... | ... | ... | ... |
+------+---------+-------+---------------+----------------+
[10000 rows x 5 columns]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings:
>>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(),
... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])})
>>> user_rating_stats
+------+---------+-------+------------------------+
| time | user_id | Count | rating_quantiles |
+------+---------+-------+------------------------+
| 2006 | 61285 | 1 | array('d', [4.0, 4.0]) |
| 2000 | 36078 | 1 | array('d', [4.0, 4.0]) |
| 2003 | 47158 | 1 | array('d', [3.0, 3.0]) |
| 2007 | 34446 | 1 | array('d', [3.0, 3.0]) |
| 2010 | 47990 | 1 | array('d', [3.0, 3.0]) |
| 2003 | 42120 | 1 | array('d', [5.0, 5.0]) |
| 2007 | 44940 | 1 | array('d', [4.0, 4.0]) |
| 2008 | 58240 | 1 | array('d', [4.0, 4.0]) |
| 2002 | 102 | 1 | array('d', [1.0, 1.0]) |
| 2009 | 52708 | 1 | array('d', [3.0, 3.0]) |
| ... | ... | ... | ... |
+------+---------+-------+------------------------+
[10000 rows x 4 columns]
To put all items a user rated into one list value by their star rating:
>>> user_rating_stats = sf.groupby(["user_id", "rating"],
... {"rated_movie_ids":agg.CONCAT("movie_id")})
>>> user_rating_stats
+--------+---------+----------------------+
| rating | user_id | rated_movie_ids |
+--------+---------+----------------------+
| 3 | 31434 | array('d', [1663.0]) |
| 5 | 25944 | array('d', [1663.0]) |
| 4 | 38827 | array('d', [1663.0]) |
| 4 | 51437 | array('d', [1663.0]) |
| 4 | 42549 | array('d', [1663.0]) |
| 4 | 49532 | array('d', [1663.0]) |
| 3 | 26124 | array('d', [1663.0]) |
| 4 | 46336 | array('d', [1663.0]) |
| 4 | 52133 | array('d', [1663.0]) |
| 5 | 62361 | array('d', [1663.0]) |
| ... | ... | ... |
+--------+---------+----------------------+
[9952 rows x 3 columns]
To put all items and rating of a given user together into a dictionary
value:
>>> user_rating_stats = sf.groupby("user_id",
... {"movie_rating":agg.CONCAT("movie_id", "rating")})
>>> user_rating_stats
+---------+--------------+
| user_id | movie_rating |
+---------+--------------+
| 62361 | {1663: 5} |
| 30727 | {1663: 4} |
| 40111 | {1663: 2} |
| 50513 | {1663: 4} |
| 35140 | {1663: 4} |
| 42352 | {1663: 5} |
| 29667 | {1663: 4} |
| 46242 | {1663: 5} |
| 58310 | {1663: 2} |
| 64614 | {1663: 2} |
| ... | ... |
+---------+--------------+
[9852 rows x 2 columns]
"""
# some basic checking first
# make sure key_columns is a list
if isinstance(key_columns, str):
key_columns = [key_columns]
# check that every column is a string, and is a valid column name
my_column_names = self.column_names()
key_columns_array = []
for column in key_columns:
if not isinstance(column, str):
raise TypeError("Column name must be a string")
if column not in my_column_names:
raise KeyError("Column " + column + " does not exist in SFrame")
if self[column].dtype() == dict:
raise TypeError("Cannot group on a dictionary column.")
key_columns_array.append(column)
group_output_columns = []
group_columns = []
group_ops = []
all_ops = [operations] + list(args)
for op_entry in all_ops:
# if it is not a dict, nor a list, it is just a single aggregator
# element (probably COUNT). wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not(isinstance(operation, list) or isinstance(operation, dict)):
operation = [operation]
if isinstance(operation, dict):
# now sweep the dict and add to group_columns and group_ops
for key in operation:
val = operation[key]
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)):
raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.")
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for (col,output) in zip(column[0],key):
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [output]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [key]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [key]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition of output column: " + key)
elif isinstance(operation, list):
# we will be using automatically defined column names
for val in operation:
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for col in column[0]:
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [""]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition.")
# let's validate group_columns and group_ops are valid
for (cols, op) in zip(group_columns, group_ops):
for col in cols:
if not isinstance(col, str):
raise TypeError("Column name must be a string")
if not isinstance(op, str):
raise TypeError("Operation type not recognized.")
if op is not graphlab.aggregate.COUNT()[0]:
for col in cols:
if col not in my_column_names:
raise KeyError("Column " + col + " does not exist in SFrame")
_mt._get_metric_tracker().track('sframe.groupby', properties={'operator':op})
with cython_context():
return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns,
group_output_columns, group_ops))
def join(self, right, on=None, how='inner'):
"""
Merge two SFrames. Merges the current (left) SFrame with the given
(right) SFrame using a SQL-style equi-join operation by columns.
Parameters
----------
right : SFrame
The SFrame to join.
on : None | str | list | dict, optional
The column name(s) representing the set of join keys. Each row that
has the same value in this set of columns will be merged together.
* If 'None' is given, join will use all columns that have the same
name as the set of join keys.
* If a str is given, this is interpreted as a join using one column,
where both SFrames have the same column name.
* If a list is given, this is interpreted as a join using one or
more column names, where each column name given exists in both
SFrames.
* If a dict is given, each dict key is taken as a column name in the
left SFrame, and each dict value is taken as the column name in
right SFrame that will be joined together. e.g.
{'left_col_name':'right_col_name'}.
how : {'left', 'right', 'outer', 'inner'}, optional
The type of join to perform. 'inner' is default.
* inner: Equivalent to a SQL inner join. Result consists of the
rows from the two frames whose join key values match exactly,
merged together into one SFrame.
* left: Equivalent to a SQL left outer join. Result is the union
between the result of an inner join and the rest of the rows from
the left SFrame, merged with missing values.
* right: Equivalent to a SQL right outer join. Result is the union
between the result of an inner join and the rest of the rows from
the right SFrame, merged with missing values.
* outer: Equivalent to a SQL full outer join. Result is
the union between the result of a left outer join and a right
outer join.
Returns
-------
out : SFrame
Examples
--------
>>> animals = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'name': ['dog', 'cat', 'sheep', 'cow']})
>>> sounds = graphlab.SFrame({'id': [1, 3, 4, 5],
... 'sound': ['woof', 'baa', 'moo', 'oink']})
>>> animals.join(sounds, how='inner')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
+----+-------+-------+
[3 rows x 3 columns]
>>> animals.join(sounds, on='id', how='left')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 2 | cat | None |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on=['id'], how='right')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on={'id':'id'}, how='outer')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
| 2 | cat | None |
+----+-------+-------+
[5 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.join', properties={'type':how})
available_join_types = ['left','right','outer','inner']
if not isinstance(right, SFrame):
raise TypeError("Can only join two SFrames")
if how not in available_join_types:
raise ValueError("Invalid join type")
join_keys = dict()
if on is None:
left_names = self.column_names()
right_names = right.column_names()
common_columns = [name for name in left_names if name in right_names]
for name in common_columns:
join_keys[name] = name
elif type(on) is str:
join_keys[on] = on
elif type(on) is list:
for name in on:
if type(name) is not str:
raise TypeError("Join keys must each be a str.")
join_keys[name] = name
elif type(on) is dict:
join_keys = on
else:
raise TypeError("Must pass a str, list, or dict of join keys")
with cython_context():
return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys))
def filter_by(self, values, column_name, exclude=False):
"""
Filter an SFrame by values inside an iterable object. Result is an
SFrame that only includes (or excludes) the rows that have a column
with the given ``column_name`` which holds one of the values in the
given ``values`` :class:`~graphlab.SArray`. If ``values`` is not an
SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SFrame. The resulting SFrame will
only include rows that have one of these values in the given
column.
column_name : str
The column of the SFrame to match with the given `values`.
exclude : bool
If True, the result SFrame will contain all rows EXCEPT those that
have one of ``values`` in ``column_name``.
Returns
-------
out : SFrame
The filtered SFrame.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'animal_type': ['dog', 'cat', 'cow', 'horse'],
... 'name': ['bob', 'jim', 'jimbob', 'bobjim']})
>>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']
>>> sf.filter_by(household_pets, 'animal_type')
+-------------+----+------+
| animal_type | id | name |
+-------------+----+------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+------+
[2 rows x 3 columns]
>>> sf.filter_by(household_pets, 'animal_type', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| horse | 4 | bobjim |
| cow | 3 | jimbob |
+-------------+----+--------+
[2 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.filter_by')
if type(column_name) is not str:
raise TypeError("Must pass a str as column_name")
if type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not hasattr(values, '__iter__'):
values = [values]
values = SArray(values)
value_sf = SFrame()
value_sf.add_column(values, column_name)
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
existing_columns = self.column_names()
if column_name not in existing_columns:
raise KeyError("Column '" + column_name + "' not in SFrame.")
existing_type = self.column_types()[self.column_names().index(column_name)]
given_type = value_sf.column_types()[0]
if given_type != existing_type:
raise TypeError("Type of given values does not match type of column '" +
column_name + "' in SFrame.")
with cython_context():
if exclude:
id_name = "id"
# Make sure this name is unique so we know what to remove in
# the result
while id_name in existing_columns:
id_name += "1"
value_sf = value_sf.add_row_number(id_name)
tmp = SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'left',
{column_name:column_name}))
ret_sf = tmp[tmp[id_name] == None]
del ret_sf[id_name]
return ret_sf
else:
return SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'inner',
{column_name:column_name}))
@_check_canvas_enabled
def show(self, columns=None, view=None, x=None, y=None):
"""
show(columns=None, view=None, x=None, y=None)
Visualize the SFrame with GraphLab Create :mod:`~graphlab.canvas`. This function
starts Canvas if it is not already running. If the SFrame has already been plotted,
this function will update the plot.
Parameters
----------
columns : list of str, optional
The columns of this SFrame to show in the SFrame view. In an
interactive browser target of Canvas, the columns will be selectable
and reorderable through the UI as well. If not specified, the
SFrame view will use all columns of the SFrame.
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on which Canvas target is set).
- 'Table': Show a scrollable, tabular view of the data in the
SFrame.
- 'Summary': Show a list of columns with some summary statistics
and plots for each column.
- 'Scatter Plot': Show a scatter plot of two numeric columns.
- 'Heat Map': Show a heat map of two numeric columns.
- 'Bar Chart': Show a bar chart of one numeric and one categorical
column.
- 'Line Chart': Show a line chart of one numeric and one
categorical column.
x : str, optional
The column to use for the X axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the columns
in this SFrame. For Scatter Plot and Heat Map, the column must be
numeric (int or float). If not set, defaults to the first available
valid column.
y : str, optional
The column to use for the Y axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the numeric
columns in this SFrame. If not set, defaults to the second
available numeric column.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view.
See Also
--------
canvas
Examples
--------
Suppose 'sf' is an SFrame, we can view it in GraphLab Canvas using:
>>> sf.show()
To choose a column filter (applied to all SFrame views):
>>> sf.show(columns=["Foo", "Bar"]) # use only columns 'Foo' and 'Bar'
>>> sf.show(columns=sf.column_names()[3:7]) # use columns 3-7
To choose a specific view of the SFrame:
>>> sf.show(view="Summary")
>>> sf.show(view="Table")
>>> sf.show(view="Bar Chart", x="col1", y="col2")
>>> sf.show(view="Line Chart", x="col1", y="col2")
>>> sf.show(view="Scatter Plot", x="col1", y="col2")
>>> sf.show(view="Heat Map", x="col1", y="col2")
"""
import graphlab.canvas
import graphlab.canvas.inspect
import graphlab.canvas.views.sframe
graphlab.canvas.inspect.find_vars(self)
return graphlab.canvas.show(graphlab.canvas.views.sframe.SFrameView(self, params={
'view': view,
'columns': columns,
'x': x,
'y': y
}))
def pack_columns(self, columns=None, column_prefix=None, dtype=list,
fill_na=None, remove_prefix=True, new_column_name=None):
"""
Pack two or more columns of the current SFrame into one single
column.The result is a new SFrame with the unaffected columns from the
original SFrame plus the newly created column.
The list of columns that are packed is chosen through either the
``columns`` or ``column_prefix`` parameter. Only one of the parameters
is allowed to be provided. ``columns`` explicitly specifies the list of
columns to pack, while ``column_prefix`` specifies that all columns that
have the given prefix are to be packed.
The type of the resulting column is decided by the ``dtype`` parameter.
Allowed values for ``dtype`` are dict, array.array and list:
- *dict*: pack to a dictionary SArray where column name becomes
dictionary key and column value becomes dictionary value
- *array.array*: pack all values from the packing columns into an array
- *list*: pack all values from the packing columns into a list.
Parameters
----------
columns : list[str], optional
A list of column names to be packed. There needs to have at least
two columns to pack. If omitted and `column_prefix` is not
specified, all columns from current SFrame are packed. This
parameter is mutually exclusive with the `column_prefix` parameter.
column_prefix : str, optional
Pack all columns with the given `column_prefix`.
This parameter is mutually exclusive with the `columns` parameter.
dtype : dict | array.array | list, optional
The resulting packed column type. If not provided, dtype is list.
fill_na : value, optional
Value to fill into packed column if missing value is encountered.
If packing to dictionary, `fill_na` is only applicable to dictionary
values; missing keys are not replaced.
remove_prefix : bool, optional
If True and `column_prefix` is specified, the dictionary key will
be constructed by removing the prefix from the column name.
This option is only applicable when packing to dict type.
new_column_name : str, optional
Packed column name. If not given and `column_prefix` is given,
then the prefix will be used as the new column name, otherwise name
is generated automatically.
Returns
-------
out : SFrame
An SFrame that contains columns that are not packed, plus the newly
packed column.
See Also
--------
unpack
Notes
-----
- There must be at least two columns to pack.
- If packing to dictionary, missing key is always dropped. Missing
values are dropped if fill_na is not provided, otherwise, missing
value is replaced by 'fill_na'. If packing to list or array, missing
values will be kept. If 'fill_na' is provided, the missing value is
replaced with 'fill_na' value.
Examples
--------
Suppose 'sf' is an an SFrame that maintains business category
information:
>>> sf = graphlab.SFrame({'business': range(1, 5),
... 'category.retail': [1, None, 1, None],
... 'category.food': [1, 1, None, None],
... 'category.service': [None, 1, 1, None],
... 'category.shop': [1, 1, None, 1]})
>>> sf
+----------+-----------------+---------------+------------------+---------------+
| business | category.retail | category.food | category.service | category.shop |
+----------+-----------------+---------------+------------------+---------------+
| 1 | 1 | 1 | None | 1 |
| 2 | None | 1 | 1 | 1 |
| 3 | 1 | None | 1 | None |
| 4 | None | 1 | None | 1 |
+----------+-----------------+---------------+------------------+---------------+
[4 rows x 5 columns]
To pack all category columns into a list:
>>> sf.pack_columns(column_prefix='category')
+----------+--------------------+
| business | X2 |
+----------+--------------------+
| 1 | [1, 1, None, 1] |
| 2 | [None, 1, 1, 1] |
| 3 | [1, None, 1, None] |
| 4 | [None, 1, None, 1] |
+----------+--------------------+
[4 rows x 2 columns]
To pack all category columns into a dictionary, with new column name:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
... new_column_name='category')
+----------+--------------------------------+
| business | category |
+----------+--------------------------------+
| 1 | {'food': 1, 'shop': 1, 're ... |
| 2 | {'food': 1, 'shop': 1, 'se ... |
| 3 | {'retail': 1, 'service': 1} |
| 4 | {'food': 1, 'shop': 1} |
+----------+--------------------------------+
[4 rows x 2 columns]
To keep column prefix in the resulting dict key:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
remove_prefix=False)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | {'category.retail': 1, 'ca ... |
| 2 | {'category.food': 1, 'cate ... |
| 3 | {'category.retail': 1, 'ca ... |
| 4 | {'category.food': 1, 'cate ... |
+----------+--------------------------------+
[4 rows x 2 columns]
To explicitly pack a set of columns:
>>> sf.pack_columns(columns = ['business', 'category.retail',
'category.food', 'category.service',
'category.shop'])
+-----------------------+
| X1 |
+-----------------------+
| [1, 1, 1, None, 1] |
| [2, None, 1, 1, 1] |
| [3, 1, None, 1, None] |
| [4, None, 1, None, 1] |
+-----------------------+
[4 rows x 1 columns]
To pack all columns with name starting with 'category' into an array
type, and with missing value replaced with 0:
>>> sf.pack_columns(column_prefix="category", dtype=array.array,
... fill_na=0)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | array('d', [1.0, 1.0, 0.0, ... |
| 2 | array('d', [0.0, 1.0, 1.0, ... |
| 3 | array('d', [1.0, 0.0, 1.0, ... |
| 4 | array('d', [0.0, 1.0, 0.0, ... |
+----------+--------------------------------+
[4 rows x 2 columns]
"""
if columns != None and column_prefix != None:
raise ValueError("'columns' and 'column_prefix' parameter cannot be given at the same time.")
if new_column_name == None and column_prefix != None:
new_column_name = column_prefix
if column_prefix != None:
if type(column_prefix) != str:
raise TypeError("'column_prefix' must be a string")
columns = [name for name in self.column_names() if name.startswith(column_prefix)]
if len(columns) == 0:
raise ValueError("There is no column starts with prefix '" + column_prefix + "'")
elif columns == None:
columns = self.column_names()
else:
if not hasattr(columns, '__iter__'):
raise TypeError("columns must be an iterable type")
column_names = set(self.column_names())
for column in columns:
if (column not in column_names):
raise ValueError("Current SFrame has no column called '" + str(column) + "'.")
# check duplicate names
if len(set(columns)) != len(columns):
raise ValueError("There is duplicate column names in columns parameter")
if (len(columns) <= 1):
raise ValueError("Please provide at least two columns to pack")
if (dtype not in (dict, list, array.array)):
raise ValueError("Resulting dtype has to be one of dict/array.array/list type")
# fill_na value for array needs to be numeric
if dtype == array.array:
if (fill_na != None) and (type(fill_na) not in (int, float)):
raise ValueError("fill_na value for array needs to be numeric type")
# all columns have to be numeric type
for column in columns:
if self[column].dtype() not in (int, float):
raise TypeError("Column '" + column + "' type is not numeric, cannot pack into array type")
# generate dict key names if pack to dictionary
# we try to be smart here
# if all column names are like: a.b, a.c, a.d,...
# we then use "b", "c", "d", etc as the dictionary key during packing
if (dtype == dict) and (column_prefix != None) and (remove_prefix == True):
size_prefix = len(column_prefix)
first_char = set([c[size_prefix:size_prefix+1] for c in columns])
if ((len(first_char) == 1) and first_char.pop() in ['.','-','_']):
dict_keys = [name[size_prefix+1:] for name in columns]
else:
dict_keys = [name[size_prefix:] for name in columns]
else:
dict_keys = columns
rest_columns = [name for name in self.column_names() if name not in columns]
if new_column_name != None:
if type(new_column_name) != str:
raise TypeError("'new_column_name' has to be a string")
if new_column_name in rest_columns:
raise KeyError("Current SFrame already contains a column name " + new_column_name)
else:
new_column_name = ""
_mt._get_metric_tracker().track('sframe.pack_columns')
ret_sa = None
with cython_context():
ret_sa = SArray(_proxy=self.__proxy__.pack_columns(columns, dict_keys, dtype, fill_na))
new_sf = self.select_columns(rest_columns)
new_sf.add_column(ret_sa, new_column_name)
return new_sf
def split_datetime(self, expand_column, column_name_prefix=None, limit=None, tzone=False):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`graphlab.SArray.split_datetim()`
Parameters
----------
expand_column : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit : list[str], optional
Limits the set of datetime elements to expand.
Elements are 'year','month','day','hour','minute',
and 'second'.
tzone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
"""
if expand_column not in self.column_names():
raise KeyError("column '" + expand_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = expand_column
new_sf = self[expand_column].split_datetime(column_name_prefix, limit, tzone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != expand_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.split_datetime')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def unpack(self, unpack_column, column_name_prefix=None, column_types=None,
na_value=None, limit=None):
"""
Expand one column of this SFrame to multiple columns with each value in
a separate column. Returns a new SFrame with the unpacked column
replaced with a list of new columns. The column must be of
list/array/dict type.
For more details regarding name generation, missing value handling and
other, refer to the SArray version of
:py:func:`~graphlab.SArray.unpack()`.
Parameters
----------
unpack_column : str
Name of the unpacked column
column_name_prefix : str, optional
If provided, unpacked column names would start with the given
prefix. If not provided, default value is the name of the unpacked
column.
column_types : [type], optional
Column types for the unpacked columns.
If not provided, column types are automatically inferred from first
100 rows. For array type, default column types are float. If
provided, column_types also restricts how many columns to unpack.
na_value : flexible_type, optional
If provided, convert all values that are equal to "na_value" to
missing value (None).
limit : list[str] | list[int], optional
Control unpacking only a subset of list/array/dict value. For
dictionary SArray, `limit` is a list of dictionary keys to restrict.
For list/array SArray, `limit` is a list of integers that are
indexes into the list/array value.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of unpacked columns.
See Also
--------
pack_columns, SArray.unpack
Examples
---------
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]})
+----+------------------+
| id | wc |
+----+------------------+
| 1 | {'a': 1} |
| 2 | {'b': 2} |
| 3 | {'a': 1, 'b': 2} |
+----+------------------+
[3 rows x 2 columns]
>>> sf.unpack('wc')
+----+------+------+
| id | wc.a | wc.b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To not have prefix in the generated column name:
>>> sf.unpack('wc', column_name_prefix="")
+----+------+------+
| id | a | b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To limit subset of keys to unpack:
>>> sf.unpack('wc', limit=['b'])
+----+------+
| id | wc.b |
+----+------+
| 1 | None |
| 2 | 2 |
| 3 | 2 |
+----+------+
[3 rows x 3 columns]
To unpack an array column:
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'friends': [array.array('d', [1.0, 2.0, 3.0]),
... array.array('d', [2.0, 3.0, 4.0]),
... array.array('d', [3.0, 4.0, 5.0])]})
>>> sf
+----+-----------------------------+
| id | friends |
+----+-----------------------------+
| 1 | array('d', [1.0, 2.0, 3.0]) |
| 2 | array('d', [2.0, 3.0, 4.0]) |
| 3 | array('d', [3.0, 4.0, 5.0]) |
+----+-----------------------------+
[3 rows x 2 columns]
>>> sf.unpack('friends')
+----+-----------+-----------+-----------+
| id | friends.0 | friends.1 | friends.2 |
+----+-----------+-----------+-----------+
| 1 | 1.0 | 2.0 | 3.0 |
| 2 | 2.0 | 3.0 | 4.0 |
| 3 | 3.0 | 4.0 | 5.0 |
+----+-----------+-----------+-----------+
[3 rows x 4 columns]
"""
if unpack_column not in self.column_names():
raise KeyError("column '" + unpack_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = unpack_column
new_sf = self[unpack_column].unpack(column_name_prefix, column_types, na_value, limit)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != unpack_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.unpack')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def stack(self, column_name, new_column_name=None, drop_na=False):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The new SFrame includes the newly created column and all columns other
than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = graphlab.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set dropna=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = graphlab.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+------+--------+
| user | friend |
+------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+------+--------+
[9 rows x 2 columns]
"""
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.")
stack_column_type = self[column_name].dtype()
if (stack_column_type not in [dict, array.array, list]):
raise TypeError("Stack is only supported for column of dict/list/array type.")
if (new_column_name != None):
if stack_column_type == dict:
if (type(new_column_name) is not list):
raise TypeError("new_column_name has to be a list to stack dict type")
elif (len(new_column_name) != 2):
raise TypeError("new_column_name must have length of two")
else:
if (type(new_column_name) != str):
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError("Column with name '" + name + "' already exists, pick a new column name")
else:
if stack_column_type == dict:
new_column_name = ["",""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if (len(head_row) == 0):
raise ValueError("Cannot infer column type because there is not enough rows to infer value")
if stack_column_type == dict:
# infer key/value type
keys = []; values = []
for row in head_row:
for val in row:
keys.append(val)
if val != None: values.append(row[val])
new_column_type = [
infer_type_of_list(keys),
infer_type_of_list(values)
]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
_mt._get_metric_tracker().track('sframe.stack')
with cython_context():
return SFrame(_proxy=self.__proxy__.stack(column_name, new_column_name, new_column_type, drop_na))
def unstack(self, column, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column`` is a numeric column, the result will be of
array.array type. If ``column`` is a non-numeric column, the new column
will be of list type. If ``column`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~graphlab.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = graphlab.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = graphlab.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='friends')
+------+-----------------------------+
| user | friends |
+------+-----------------------------+
| 3 | array('d', [5.0]) |
| 1 | array('d', [2.0, 4.0, 3.0]) |
| 2 | array('d', [5.0, 6.0, 4.0]) |
| 4 | array('d', [2.0, 3.0]) |
+------+-----------------------------+
[4 rows x 2 columns]
"""
if (type(column) != str and len(column) != 2):
raise TypeError("'column' parameter has to be either a string or a list of two strings.")
_mt._get_metric_tracker().track('sframe.unstack')
with cython_context():
if type(column) == str:
key_columns = [i for i in self.column_names() if i != column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name : graphlab.aggregate.CONCAT(column)})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column))
elif len(column) == 2:
key_columns = [i for i in self.column_names() if i not in column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name:graphlab.aggregate.CONCAT(column[0], column[1])})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column[0], column[1]))
def unique(self):
"""
Remove duplicate rows of the SFrame. Will not necessarily preserve the
order of the given SFrame in the new SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the unique rows of the current SFrame.
Raises
------
TypeError
If any column in the SFrame is a dictionary type.
See Also
--------
SArray.unique
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]})
>>> sf
+----+-------+
| id | value |
+----+-------+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
| 3 | 3 |
| 4 | 4 |
+----+-------+
[5 rows x 2 columns]
>>> sf.unique()
+----+-------+
| id | value |
+----+-------+
| 2 | 2 |
| 4 | 4 |
| 3 | 3 |
| 1 | 1 |
+----+-------+
[4 rows x 2 columns]
"""
return self.groupby(self.column_names(),{})
def sort(self, sort_columns, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
sort_columns : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `sort_columns` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = graphlab.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
"""
sort_column_names = []
sort_column_orders = []
# validate sort_columns
if (type(sort_columns) == str):
sort_column_names = [sort_columns]
elif (type(sort_columns) == list):
if (len(sort_columns) == 0):
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in sort_columns])
if (len(first_param_types) != 1):
raise ValueError("sort_columns element are not of the same type")
first_param_type = first_param_types.pop()
if (first_param_type == tuple):
sort_column_names = [i[0] for i in sort_columns]
sort_column_orders = [i[1] for i in sort_columns]
elif(first_param_type == str):
sort_column_names = sort_columns
else:
raise TypeError("sort_columns type is not supported")
else:
raise TypeError("sort_columns type is not correct. Supported types are str, list of str or list of (str,bool) pair.")
# use the second parameter if the sort order is not given
if (len(sort_column_orders) == 0):
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if (type(column) != str):
raise TypeError("Only string parameter can be passed in as column names")
if (column not in my_column_names):
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if (self[column].dtype() not in (str, int, float,datetime.datetime)):
raise TypeError("Only columns of type (str, int, float) can be sorted")
_mt._get_metric_tracker().track('sframe.sort')
with cython_context():
return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders))
def dropna(self, columns=None, how='any'):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False))
def dropna_split(self, columns=None, how='any'):
"""
Split rows with missing values from this SFrame. This function has the
same functionality as :py:func:`~graphlab.SFrame.dropna`, but returns a
tuple of two SFrames. The first item is the expected output from
:py:func:`~graphlab.SFrame.dropna`, and the second item contains all the
rows filtered out by the `dropna` algorithm.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : (SFrame, SFrame)
(SFrame with missing values removed,
SFrame with the removed missing values)
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> good, bad = sf.dropna_split()
>>> good
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
>>> bad
+------+------+
| a | b |
+------+------+
| None | b |
| None | None |
+------+------+
[2 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna_split')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return (SFrame(_proxy=self.__proxy__), SFrame())
(columns, all_behavior) = self.__dropna_errchk(columns, how)
sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True)
if len(sframe_tuple) != 2:
raise RuntimeError("Did not return two SFrames!")
with cython_context():
return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
def __dropna_errchk(self, columns, how):
if columns is None:
# Default behavior is to consider every column, specified to
# the server by an empty list (to avoid sending all the column
# in this case, since it is the most common)
columns = list()
elif type(columns) is str:
columns = [columns]
elif type(columns) is not list:
raise TypeError("Must give columns as a list, str, or 'None'")
else:
# Verify that we are only passing strings in our list
list_types = set([type(i) for i in columns])
if (str not in list_types) or (len(list_types) > 1):
raise TypeError("All columns must be of 'str' type")
if how not in ['any','all']:
raise ValueError("Must specify 'any' or 'all'")
if how == 'all':
all_behavior = True
else:
all_behavior = False
return (columns, all_behavior)
def fillna(self, column, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
"""
# Normal error checking
if type(column) is not str:
raise TypeError("Must give column name as a str")
ret = self[self.column_names()]
ret[column] = ret[column].fillna(value)
return ret
def add_row_number(self, column_name='id', start=0):
"""
Returns a new SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.add_row_number')
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame")
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name)
new_sf.add_columns(self)
return new_sf
@property
def shape(self):
"""
The shape of the SFrame, in a tuple. The first entry is the number of
rows, the second is the number of columns.
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.shape
(3, 2)
"""
return (self.num_rows(), self.num_cols())
@property
def __proxy__(self):
return self._proxy
@__proxy__.setter
def __proxy__(self, value):
assert type(value) is UnitySFrameProxy
self._proxy = value
| agpl-3.0 |
pkruskal/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
akrherz/iem | scripts/smos/plot.py | 1 | 3769 | """Create a plot of SMOS data for either 0 or 12z"""
import sys
import datetime
import numpy as np
from pandas.io.sql import read_sql
from pyiem.plot import get_cmap
from pyiem.plot.geoplot import MapPlot
from pyiem.util import get_dbconn, logger, utc
LOG = logger()
def makeplot(ts, routes="ac"):
"""
Generate two plots for a given time GMT
"""
pgconn = get_dbconn("smos", user="nobody")
df = read_sql(
"""
WITH obs as (
SELECT grid_idx, avg(soil_moisture) * 100. as sm,
avg(optical_depth) as od from data where valid BETWEEN %s and %s
GROUP by grid_idx)
SELECT ST_x(geom) as lon, ST_y(geom) as lat,
CASE WHEN sm is Null THEN -1 ELSE sm END as sm,
CASE WHEN od is Null THEN -1 ELSE od END as od
from obs o JOIN grid g ON (o.grid_idx = g.idx)
""",
pgconn,
params=(
ts - datetime.timedelta(hours=6),
ts + datetime.timedelta(hours=6),
),
index_col=None,
)
if df.empty:
LOG.info(
"Did not find SMOS data for: %s-%s",
ts - datetime.timedelta(hours=6),
ts + datetime.timedelta(hours=6),
)
return
for sector in ["midwest", "iowa"]:
clevs = np.arange(0, 71, 5)
mp = MapPlot(
sector=sector,
axisbg="white",
title="SMOS Satellite: Soil Moisture (0-5cm)",
subtitle="Satelite passes around %s UTC"
% (ts.strftime("%d %B %Y %H"),),
)
if sector == "iowa":
mp.drawcounties()
cmap = get_cmap("jet_r")
cmap.set_under("#EEEEEE")
cmap.set_over("k")
mp.hexbin(
df["lon"].values,
df["lat"].values,
df["sm"],
clevs,
units="%",
cmap=cmap,
)
pqstr = "plot %s %s00 smos_%s_sm%s.png smos_%s_sm%s.png png" % (
routes,
ts.strftime("%Y%m%d%H"),
sector,
ts.strftime("%H"),
sector,
ts.strftime("%H"),
)
mp.postprocess(pqstr=pqstr)
mp.close()
for sector in ["midwest", "iowa"]:
clevs = np.arange(0, 1.001, 0.05)
mp = MapPlot(
sector=sector,
axisbg="white",
title=(
"SMOS Satellite: Land Cover Optical Depth "
"(microwave L-band)"
),
subtitle="Satelite passes around %s UTC"
% (ts.strftime("%d %B %Y %H"),),
)
if sector == "iowa":
mp.drawcounties()
cmap = get_cmap("jet")
cmap.set_under("#EEEEEE")
cmap.set_over("k")
mp.hexbin(
df["lon"].values, df["lat"].values, df["od"], clevs, cmap=cmap
)
pqstr = "plot %s %s00 smos_%s_od%s.png smos_%s_od%s.png png" % (
routes,
ts.strftime("%Y%m%d%H"),
sector,
ts.strftime("%H"),
sector,
ts.strftime("%H"),
)
mp.postprocess(pqstr=pqstr)
mp.close()
def main(argv):
"""Go Main Go"""
if len(argv) == 2:
hr = int(argv[1])
if hr == 12: # Run for the previous UTC day
ts = utc() - datetime.timedelta(days=1)
ts = ts.replace(hour=12, minute=0, second=0, microsecond=0)
else:
ts = utc().replace(hour=0, minute=0, second=0, microsecond=0)
makeplot(ts)
# Run a day, a week ago ago as well
for d in [1, 5]:
ts -= datetime.timedelta(days=d)
makeplot(ts, "a")
else:
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
makeplot(ts, "a")
if __name__ == "__main__":
main(sys.argv)
| mit |
bsipocz/scikit-image | doc/examples/plot_windowed_histogram.py | 26 | 5127 | from __future__ import division
"""
========================
Sliding window histogram
========================
Histogram matching can be used for object detection in images [1]_. This
example extracts a single coin from the `skimage.data.coins` image and uses
histogram matching to attempt to locate it within the original image.
First, a box-shaped region of the image containing the target coin is
extracted and a histogram of its greyscale values is computed.
Next, for each pixel in the test image, a histogram of the greyscale values in
a region of the image surrounding the pixel is computed.
`skimage.filters.rank.windowed_histogram` is used for this task, as it employs
an efficient sliding window based algorithm that is able to compute these
histograms quickly [2]_. The local histogram for the region surrounding each
pixel in the image is compared to that of the single coin, with a similarity
measure being computed and displayed.
The histogram of the single coin is computed using `numpy.histogram` on a box
shaped region surrounding the coin, while the sliding window histograms are
computed using a disc shaped structural element of a slightly different size.
This is done in aid of demonstrating that the technique still finds similarity
in spite of these differences.
To demonstrate the rotational invariance of the technique, the same test is
performed on a version of the coins image rotated by 45 degrees.
References
----------
.. [1] Porikli, F. "Integral Histogram: A Fast Way to Extract Histograms
in Cartesian Spaces" CVPR, 2005. Vol. 1. IEEE, 2005
.. [2] S.Perreault and P.Hebert. Median filtering in constant time.
Trans. Image Processing, 16(9):2389-2394, 2007.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, transform
from skimage.util import img_as_ubyte
from skimage.morphology import disk
from skimage.filters import rank
matplotlib.rcParams['font.size'] = 9
def windowed_histogram_similarity(image, selem, reference_hist, n_bins):
# Compute normalized windowed histogram feature vector for each pixel
px_histograms = rank.windowed_histogram(image, selem, n_bins=n_bins)
# Reshape coin histogram to (1,1,N) for broadcast when we want to use it in
# arithmetic operations with the windowed histograms from the image
reference_hist = reference_hist.reshape((1, 1) + reference_hist.shape)
# Compute Chi squared distance metric: sum((X-Y)^2 / (X+Y));
# a measure of distance between histograms
X = px_histograms
Y = reference_hist
num = (X - Y) ** 2
denom = X + Y
denom[denom == 0] = np.infty
frac = num / denom
chi_sqr = 0.5 * np.sum(frac, axis=2)
# Generate a similarity measure. It needs to be low when distance is high
# and high when distance is low; taking the reciprocal will do this.
# Chi squared will always be >= 0, add small value to prevent divide by 0.
similarity = 1 / (chi_sqr + 1.0e-4)
return similarity
# Load the `skimage.data.coins` image
img = img_as_ubyte(data.coins())
# Quantize to 16 levels of greyscale; this way the output image will have a
# 16-dimensional feature vector per pixel
quantized_img = img // 16
# Select the coin from the 4th column, second row.
# Co-ordinate ordering: [x1,y1,x2,y2]
coin_coords = [184, 100, 228, 148] # 44 x 44 region
coin = quantized_img[coin_coords[1]:coin_coords[3],
coin_coords[0]:coin_coords[2]]
# Compute coin histogram and normalize
coin_hist, _ = np.histogram(coin.flatten(), bins=16, range=(0, 16))
coin_hist = coin_hist.astype(float) / np.sum(coin_hist)
# Compute a disk shaped mask that will define the shape of our sliding window
# Example coin is ~44px across, so make a disk 61px wide (2 * rad + 1) to be
# big enough for other coins too.
selem = disk(30)
# Compute the similarity across the complete image
similarity = windowed_histogram_similarity(quantized_img, selem, coin_hist,
coin_hist.shape[0])
# Now try a rotated image
rotated_img = img_as_ubyte(transform.rotate(img, 45.0, resize=True))
# Quantize to 16 levels as before
quantized_rotated_image = rotated_img // 16
# Similarity on rotated image
rotated_similarity = windowed_histogram_similarity(quantized_rotated_image,
selem, coin_hist,
coin_hist.shape[0])
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
axes[0, 0].imshow(quantized_img, cmap='gray')
axes[0, 0].set_title('Quantized image')
axes[0, 0].axis('off')
axes[0, 1].imshow(coin, cmap='gray')
axes[0, 1].set_title('Coin from 2nd row, 4th column')
axes[0, 1].axis('off')
axes[1, 0].imshow(img, cmap='gray')
axes[1, 0].imshow(similarity, cmap='hot', alpha=0.5)
axes[1, 0].set_title('Original image with overlaid similarity')
axes[1, 0].axis('off')
axes[1, 1].imshow(rotated_img, cmap='gray')
axes[1, 1].imshow(rotated_similarity, cmap='hot', alpha=0.5)
axes[1, 1].set_title('Rotated image with overlaid similarity')
axes[1, 1].axis('off')
plt.show()
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
adocherty/polymode | setup.py | 5 | 1794 | #!/usr/bin/env python
from os.path import join
#Use setuptools for egg installs, if possible
import setuptools
from numpy.distutils.core import setup, Command
from Polymode import __version__
package_name = 'Polymode'
package_version = __version__
package_description ="A package for the modal analysis of microstructured optical fibers"
class generate_api_docs(Command):
"""Generate the api documentation using epydoc
"""
description = "generate the api documentation"
user_options = []
target_dir = "../documentation/api"
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
os.system("epydoc --no-frames -o %s Polymode" % self.target_dir)
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=False,
)
#The module
config.add_subpackage(package_name)
#Other packages used
config.add_subpackage('Nurbs', subpackage_path='other/Nurbs')
return config
def setup_package():
setup(
name = package_name,
version = package_version,
description = package_description,
maintainer = "Andrew Docherty",
url='http://polymode.googlecode.com',
license='GPL3',
configuration = configuration,
# install_requires = ['numpy >= 1.0.1', 'scipy>=0.5.2', 'matplotlib>=0.92',],
zip_safe = True,
cmdclass = {'doc' : generate_api_docs}
)
return
if __name__ == '__main__':
setup_package()
| gpl-3.0 |
freeman-lab/altair | altair/tests/test_api.py | 1 | 6787 | import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
from altair import api
from .. import api, spec
VALID_MARKTYPES = spec.SPEC['properties']['marktype']['enum']
def test_empty_data():
d = api.Data()
assert d.formatType=='json'
assert 'formatType' in d
assert 'url' not in d
assert 'values' not in d
def test_dict_data():
data = dict(x=[1, 2, 3],
y=[4, 5, 6])
spec = api.Viz(data)
assert np.all(spec.data == pd.DataFrame(data))
def test_dataframe_data():
datadict = dict(x=[1, 2, 3],
y=[4, 5, 6])
data = pd.DataFrame(datadict)
spec = api.Viz(data)
assert np.all(spec.data == data)
def test_to_dict():
data = pd.DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
spec = api.Viz(data).encode(x='x', y='y')
D = spec.to_dict()
assert D == {'data': {'formatType': 'json',
'values': [{'x': 1, 'y': 4},
{'x': 2, 'y': 5},
{'x': 3, 'y': 6}]},
'encoding': {'x': {'bin': False, 'name': 'x', 'type': 'Q'},
'y': {'bin': False, 'name': 'y', 'type': 'Q'}},
'marktype': 'point'}
def test_markers():
data = dict(x=[1, 2, 3],
y=[4, 5, 6])
spec = api.Viz(data)
# call, e.g. spec.mark('point')
for marktype in VALID_MARKTYPES:
spec.mark(marktype)
assert spec.marktype == marktype
# call, e.g. spec.point()
for marktype in VALID_MARKTYPES:
method = marktype
getattr(spec, method)()
assert spec.marktype == marktype
def test_encode():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x='col1', y='col2', row='col3', col='col4',
size='col5', color='col6', shape='col7')
spec = api.Viz(data).encode(**kwargs)
for key, name in kwargs.items():
assert getattr(spec.encoding, key).name == name
def test_encode_aggregates():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x=('count', 'col1'), y=('count', 'col2'),
row=('count', 'col3'), col=('count', 'col4'),
size=('avg', 'col5'), color=('max', 'col6'),
shape=('count', 'col7'))
spec = api.Viz(data).encode(**{key:"{0}({1})".format(*val)
for key, val in kwargs.items()})
for key, val in kwargs.items():
agg, name = val
assert getattr(spec.encoding, key).name == name
assert getattr(spec.encoding, key).aggregate == agg
def test_encode_types():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x=('col1', 'Q'), y=('col2', 'Q'),
row=('col3', 'O'), col=('col4', 'N'),
size=('col5', 'Q'), color=('col6', 'T'),
shape=('col7', 'O'))
spec = api.Viz(data).encode(**{key:"{0}:{1}".format(*val)
for key, val in kwargs.items()})
for key, val in kwargs.items():
name, typ = val
assert getattr(spec.encoding, key).name == name
assert getattr(spec.encoding, key).type == typ
def test_infer_types():
data = dict(col1=[1.0, 2.0, 3.0],
col2=[0.1, 0.2, 0.3],
col3=['A', 'B', 'C'],
col4=[True, False, True],
col5=[0.1, 0.2, 0.3],
col6=pd.date_range('2012', periods=3, freq='A'),
col7=np.arange(3))
kwargs = dict(x=('col1', 'Q'), y=('col2', 'Q'),
row=('col3', 'N'), col=('col4', 'N'),
size=('col5', 'Q'), color=('col6', 'T'),
shape=('col7', 'Q'))
spec = api.Viz(data).encode(**{key: val[0]
for key, val in kwargs.items()})
for key, val in kwargs.items():
name, typ = val
assert getattr(spec.encoding, key).name == name
assert getattr(spec.encoding, key).type == typ
def test_hist():
data = dict(x=[1, 2, 3],
y=[4, 5, 6])
viz1 = api.Viz(data).hist(x='x')
assert viz1.encoding.x.name == "x"
assert viz1.encoding.x.bin.maxbins == 10
assert viz1.encoding.y.name == "x"
assert viz1.encoding.y.type == "Q"
assert viz1.encoding.y.aggregate == "count"
viz2 = api.Viz(data).hist(x="x", bins=30)
assert viz2.encoding.x.bin.maxbins == 30
expected = {'data': {'formatType': 'json',
'values': [{'x': 1, 'y': 4}, {'x': 2, 'y': 5},
{'x': 3, 'y': 6}]},
'encoding': {'x': {'bin': {'maxbins': 30}, 'name': 'x'},
'y': {'aggregate': 'count',
'bin': False,
'name': 'x',
'type': 'Q'}},
'marktype': 'bar'}
viz3 = api.Viz(data).hist(x="x:O",
color=api.Color(shorthand="bar", type="N")
)
assert viz3.encoding.x.name == "x"
assert viz3.encoding.x.type == "O"
expected = {'data': {'formatType': 'json',
'values': [{'x': 1, 'y': 4}, {'x': 2, 'y': 5},
{'x': 3, 'y': 6}]},
'encoding': {'x': {'bin': {'maxbins': 10},
'name': 'x', 'type': 'O'},
'y': {'aggregate': 'count',
'bin': False,
'name': 'x',
'type': 'Q'},
'color': {'bin': False,
'name': 'bar',
'opacity': 1.0,
'type': 'N',
'value': '#4682b4'}},
'marktype': 'bar'}
assert viz3.to_dict() == expected
viz4 = api.Viz(data).hist(
x=api.X(shorthand="x", bin=api.Bin(maxbins=40)))
assert viz4.encoding.x.name == "x"
assert viz4.encoding.x.bin.maxbins == 40
| bsd-3-clause |
daniorerio/trackpy | trackpy/utils.py | 1 | 6527 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import collections
import functools
import re
import sys
import warnings
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from scipy import stats
import yaml
def fit_powerlaw(data, plot=True, **kwargs):
"""Fit a powerlaw by doing a linear regression in log space."""
ys = pd.DataFrame(data)
x = pd.Series(data.index.values, index=data.index, dtype=np.float64)
values = pd.DataFrame(index=['n', 'A'])
fits = {}
for col in ys:
y = ys[col].dropna()
slope, intercept, r, p, stderr = \
stats.linregress(np.log(x), np.log(y))
values[col] = [slope, np.exp(intercept)]
fits[col] = x.apply(lambda x: np.exp(intercept)*x**slope)
values = values.T
fits = pd.concat(fits, axis=1)
if plot:
from trackpy import plots
plots.fit(data, fits, logx=True, logy=True, legend=False, **kwargs)
return values
class memo(object):
"""Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize """
def __init__(self, func):
self.func = func
self.cache = {}
functools.update_wrapper(self, func)
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
warnings.warn("A memoization cache is being used on an uncacheable " +
"object. Proceeding by bypassing the cache.",
UserWarning)
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
# This code trips up numba. It's nice for development
# but it shouldn't matter for users.
# def __repr__(self):
# '''Return the function's docstring.'''
# return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def extract(pattern, string, group, convert=None):
"""Extract a pattern from a string. Optionally, convert it
to a desired type (float, timestamp, etc.) by specifying a function.
When the pattern is not found, gracefully return None."""
# group may be 1, (1,) or (1, 2).
if type(group) is int:
grp = (group,)
elif type(group) is tuple:
grp = group
assert type(grp) is tuple, "The arg 'group' should be an int or a tuple."
try:
result = re.search(pattern, string, re.DOTALL).group(*grp)
except AttributeError:
# For easy unpacking, when a tuple is expected, return a tuple of Nones.
return None if type(group) is int else (None,)*len(group)
return convert(result) if convert else result
def timestamp(ts_string):
"Convert a timestamp string to a datetime type."
if ts_string is None:
return None
return datetime.strptime(ts_string, '%Y-%m-%d %H:%M:%S')
def time_interval(raw):
"Convert a time interval string into a timedelta type."
if raw is None:
return None
m = re.match('([0-9][0-9]):([0-5][0-9]):([0-5][0-9])', raw)
h, m, s = map(int, m.group(1, 2, 3))
return timedelta(hours=h, minutes=m, seconds=s)
def suppress_plotting():
import matplotlib.pyplot as plt
plt.switch_backend('Agg') # does not plot to screen
# HH:MM:SS, H:MM:SS, MM:SS, M:SS all OK
lazy_timestamp_pat = r'\d?\d?:?\d?\d:\d\d'
# a time stamp followed by any text comment
ltp = lazy_timestamp_pat
video_log_pattern = r'(' + ltp + r')-?(' + ltp + r')? ?(RF)?(.+)?'
def lazy_timestamp(partial_timestamp):
"""Regularize a lazy timestamp like '0:37' -> '00:00:37'.
HH:MM:SS, H:MM:SS, MM:SS, and M:SS all OK.
Parameters
----------
partial_timestamp : string or other object
Returns
-------
regularized string
"""
if not isinstance(partial_timestamp, str):
# might be NaN or other unprocessable entry
return partial_timestamp
input_format = '\d?\d?:?\d?\d:\d\d'
if not re.match(input_format, partial_timestamp):
raise ValueError("Input string cannot be regularized.")
partial_digits = list(partial_timestamp)
digits = ['0', '0', ':', '0', '0', ':', '0', '0']
digits[-len(partial_digits):] = partial_digits
return ''.join(digits)
def timedelta_to_frame(timedeltas, fps):
"""Convert timedelta times into frame numbers.
Parameters
----------
timedelta : DataFrame or Series of timedelta64 datatype
fps : frames per second (integer)
Result
------
DataFrame
Note
----
This sounds like a stupidly easy operation, but handling missing data
and multiplication is tricky with timedeltas.
"""
ns = timedeltas.values
seconds = ns * 1e-9
frame_numbers = seconds*fps
result = pd.DataFrame(frame_numbers, dtype=np.int64,
index=timedeltas.index, columns=timedeltas.columns)
result = result.where(timedeltas.notnull(), np.nan)
return result
def random_walk(N):
return np.cumsum(np.random.randn(N), 1)
def record_meta(meta_data, filename):
with open(filename, 'w') as output:
output.write(yaml.dump(meta_data, default_flow_style=False))
def validate_tuple(value, ndim):
if not hasattr(value, '__iter__'):
return (value,) * ndim
if len(value) == ndim:
return tuple(value)
raise ValueError("List length should have same length as image dimensions.")
try:
from IPython.core.display import clear_output
except ImportError:
pass
def print_update(message):
"Print a message immediately; do not wait for current execution to finish."
try:
clear_output()
except Exception:
pass
print(message)
sys.stdout.flush()
def make_pandas_strict():
"""Configure Pandas to raise an exception for "chained assignments."
This is useful during tests.
See http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
Does nothing for Pandas versions before 0.13.0.
"""
major, minor, micro = pd.__version__.split('.')
if major == '0' and int(minor) >= 13:
pd.set_option('mode.chained_assignment', 'raise')
| bsd-3-clause |
wanggang3333/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
bflaven/BlogArticlesExamples | extending_streamlit_usage/005_other_nlp_attempts/example_text_classification_5.py | 1 | 16145 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/extending_streamlit_usage/005_other_nlp_attempts/
[file]
python example_text_classification_5.py
"""
## SOURCE :: https://towardsdatascience.com/text-analysis-feature-engineering-with-nlp-502d6ea9225d
## SOURCE :: https://www.experfy.com/blog/ai-ml/text-classification-with-nlp-tf-idf-vs-word2vec-vs-bert/
## for data
import pandas as pd
import collections
import json
## for plotting
import matplotlib.pyplot as plt
import seaborn as sns
import wordcloud
## for text processing
import re
import nltk
## for language detection
import langdetect
## for sentiment
from textblob import TextBlob
## for ner
import spacy
## for vectorizer
from sklearn import feature_extraction, manifold
## for word embedding
import gensim.downloader as gensim_api
## for topic modeling
import gensim
lst_dics = []
# Change to bigger json News_Category_Dataset_v2.json if needed
with open('datas/News_Category_Dataset_v2_light.json', mode='r', errors='ignore') as json_file:
for dic in json_file:
lst_dics.append(json.loads(dic))
## print the first one
print("\n--- OUTPUT_1")
print(lst_dics[0])
## create dtf
dtf = pd.DataFrame(lst_dics)
## filter categories
dtf = dtf[dtf["category"].isin(['ENTERTAINMENT', 'POLITICS', 'TECH'])][[
"category", "headline"]]
## rename columns
dtf = dtf.rename(columns={"category": "y", "headline": "text"})
## print 5 random rows
print("\n--- OUTPUT_2")
print (dtf.sample(5))
# SHOW IMAGE
# define axis
# x = "y"
# fig, ax = plt.subplots()
# fig.suptitle(x, fontsize=12)
# dtf[x].reset_index().groupby(x).count().sort_values(by= "index").plot(kind="barh", legend=False, ax=ax).grid(axis='x')
# plt.show()
# Language Detection
txt = dtf["text"].iloc[0]
print("\n--- OUTPUT_3")
print(txt, " --> ", langdetect.detect(txt))
# Add a column with the language information for the whole dataset
dtf['lang'] = dtf["text"].apply(
lambda x: langdetect.detect(x)if x.strip() != "" else "")
# print head dataset
# CAUTION VERY LONG PROCESS, BE PATIENT
# show the dataset with the language column
# dtf.head()
# show repartition by lang in a graphic
# CAUTION VERY LONG PROCESS, BE PATIENT
# x = "lang"
# fig, ax = plt.subplots()
# fig.suptitle(x, fontsize=12)
# dtf[x].reset_index().groupby(x).count().sort_values(by="index").plot(kind="barh", legend=False, ax=ax).grid(axis='x')
# plt.show()
print("\n--- OUTPUT_4")
# Text Preprocessing
# CAUTION VERY LONG PROCESS, BE PATIENT
print("--- original ---")
print(txt)
print("--- cleaning ---")
txt = re.sub(r'[^\w\s]', '', str(txt).lower().strip())
print(txt)
print("--- tokenization ---")
txt = txt.split()
print(txt)
# Load generic stop words for the English vocabulary with NLTK
lst_stopwords = nltk.corpus.stopwords.words("english")
print("\n--- OUTPUT_5")
print(lst_stopwords)
# remove those stop words from the first news headline
print("\n--- OUTPUT_6")
print("--- remove stopwords ---")
txt = [word for word in txt if word not in lst_stopwords]
print(txt)
print("\n--- OUTPUT_7")
# Stemming and Lemmatization
print("--- stemming ---")
ps = nltk.stem.porter.PorterStemmer()
print([ps.stem(word) for word in txt])
print("--- lemmatisation ---")
lem = nltk.stem.wordnet.WordNetLemmatizer()
print([lem.lemmatize(word) for word in txt])
'''
Preprocess a string.
:parameter
:param text: string - name of column containing text
:param lst_stopwords: list - list of stopwords to remove
:param flg_stemm: bool - whether stemming is to be applied
:param flg_lemm: bool - whether lemmitisation is to be applied
:return
cleaned text
'''
def utils_preprocess_text(text, flg_stemm=False, flg_lemm=True, lst_stopwords=None):
## clean (convert to lowercase and remove punctuations and characters and then strip)
text = re.sub(r'[^\w\s]', '', str(text).lower().strip())
## Tokenize (convert from string to list)
lst_text = text.split()
## remove Stopwords
if lst_stopwords is not None:
lst_text = [word for word in lst_text if word not in
lst_stopwords]
## Stemming (remove -ing, -ly, ...)
if flg_stemm == True:
ps = nltk.stem.porter.PorterStemmer()
lst_text = [ps.stem(word) for word in lst_text]
## Lemmatisation (convert the word into root word)
if flg_lemm == True:
lem = nltk.stem.wordnet.WordNetLemmatizer()
lst_text = [lem.lemmatize(word) for word in lst_text]
## back to string from list
text = " ".join(lst_text)
return text
# be sure to define dtf, don't mess with the arguments
dtf["text_clean"] = dtf["text"].apply(lambda x:utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=lst_stopwords))
print("\n--- OUTPUT_8")
# output
print(dtf.head())
print("\n--- OUTPUT_9")
# Print the first line original and then the same but cleaned
print(dtf["text"].iloc[0], " --> ", dtf["text_clean"].iloc[0])
# Length Analysis
"""
- word count: counts the number of tokens in the text (separated by a space)
- character count: sum the number of characters of each token
- sentence count: count the number of sentences (separated by a period)
- average word length: sum of words length divided by the number of words (character count/word count)
- average sentence length: sum of sentences length divided by the number of sentences (word count/sentence count)
"""
# word_count
dtf['word_count'] = dtf["text"].apply(lambda x: len(str(x).split(" ")))
# char_count
dtf['char_count'] = dtf["text"].apply(
lambda x: sum(len(word) for word in str(x).split(" ")))
# sentence_count
dtf['sentence_count'] = dtf["text"].apply(lambda x: len(str(x).split(".")))
# avg_word_length
dtf['avg_word_length'] = dtf['char_count'] / dtf['word_count']
# avg_sentence_lenght
dtf['avg_sentence_lenght'] = dtf['word_count'] / dtf['sentence_count']
print("\n--- OUTPUT_10")
# output
dtf.head()
"""
What’s the distribution of those new variables with respect to the target? To answer that I’ll look at the bivariate distributions (how two variables move together). First, I shall split the whole set of observations into 3 samples (Politics, Entertainment, Tech), then compare the histograms and densities of the samples. If the distributions are different then the variable is predictive because the 3 groups have different patterns.
For instance, let’s see if the character count is correlated with the target variable.
"""
# x, y = "char_count", "y"
# fig, ax = plt.subplots(nrows=1, ncols=2)
# fig.suptitle(x, fontsize=12)
# for i in dtf[y].unique():
# sns.distplot(dtf[dtf[y] == i][x], hist=True, kde=False,
# bins=10, hist_kws={"alpha": 0.8},
# axlabel="histogram", ax=ax[0])
# sns.distplot(dtf[dtf[y] == i][x], hist=False, kde=True,
# kde_kws={"shade": True}, axlabel="density",
# ax=ax[1])
# ax[0].grid(True)
# ax[0].legend(dtf[y].unique())
# ax[1].grid(True)
# plt.show()
# Sentiment Analysis
dtf["sentiment"] = dtf["text"].apply(lambda x:TextBlob(x).sentiment.polarity)
print("\n--- OUTPUT_11")
# output
print(dtf.head())
print("\n--- OUTPUT_12")
# output the sentiment for the first sentence
print(dtf["text"].iloc[0], " --> ", dtf["sentiment"].iloc[0])
# USING SPACY
# Named-Entity Recognition
## call model
ner = spacy.load("en_core_web_lg")
# ## tag text
txt = dtf["text"].iloc[0]
doc = ner(txt)
# ## display result
# spacy.displacy.render(doc, style="ent")
# CAUTION VERY LONG PROCESS, BE PATIENT
## tag text and exctract tags into a list
dtf["tags"] = dtf["text"].apply(lambda x: [(tag.text, tag.label_)
for tag in ner(x).ents])
## utils function to count the element of a list
def utils_lst_count(lst):
dic_counter = collections.Counter()
for x in lst:
dic_counter[x] += 1
dic_counter = collections.OrderedDict(
sorted(dic_counter.items(),
key=lambda x: x[1], reverse=True))
lst_count = [{key: value} for key, value in dic_counter.items()]
return lst_count
## count tags
dtf["tags"] = dtf["tags"].apply(lambda x: utils_lst_count(x))
## utils function create new column for each tag category
def utils_ner_features(lst_dics_tuples, tag):
if len(lst_dics_tuples) > 0:
tag_type = []
for dic_tuples in lst_dics_tuples:
for tuple in dic_tuples:
type, n = tuple[1], dic_tuples[tuple]
tag_type = tag_type + [type]*n
dic_counter = collections.Counter()
for x in tag_type:
dic_counter[x] += 1
return dic_counter[tag]
else:
return 0
## extract features
tags_set = []
for lst in dtf["tags"].tolist():
for dic in lst:
for k in dic.keys():
tags_set.append(k[1])
tags_set = list(set(tags_set))
for feature in tags_set:
dtf["tags_"+feature] = dtf["tags"].apply(lambda x:utils_ner_features(x, feature))
## print result
print("\n--- OUTPUT_13")
print(dtf.head())
# output image
# Unpack the column “tags” we created in the previous code.
# not working
"""
y = "ENTERTAINMENT"
tags_list = dtf[dtf["y"]==y]["tags"].sum()
map_lst = list(map(lambda x: list(x.keys())[0], tags_list))
dtf_tags = pd.DataFrame(map_lst, columns=['tag','type'])
dtf_tags["count"] = 1
dtf_tags = dtf_tags.groupby(['type',
'tag']).count().reset_index().sort_values("count",
ascending=False)
fig, ax = plt.subplots()
fig.suptitle("Top frequent tags", fontsize=12)
# change
# sns.barplot(x="count", y="tag", hue="type", data=dtf_tags.iloc[:top,:], dodge=False, ax=ax)
# sns.barplot(x="count", y="tag", hue="type", data=dtf_tags.iloc[:,:], dodge=False, ax=ax)
# sns.barplot(x="count", y="tag", hue="type", data=dtf_tags.iloc[[0,2], [0,1]], dodge=False, ax=ax)
# exemples
# df.iloc[[0,2], [0,1]]
# print(df.iloc[:,0:4])
sns.barplot(x="count", y="tag", hue="type", data=dtf_tags.iloc[:,0:4], dodge=False, ax=ax)
ax.grid(axis="x")
plt.show()
"""
## predict wit NER
txt = dtf["text"].iloc[0]
entities = ner(txt).ents
## tag text
tagged_txt = txt
for tag in entities:
tagged_txt = re.sub(tag.text, "_".join(tag.text.split()),
tagged_txt)
## show result
print("\n--- OUTPUT_14")
print(tagged_txt)
# Word Frequency
# Show how to calculate unigrams and bigrams frequency taking the sample of Politics news.
print("\n--- OUTPUT_15 BIGRAMS NOT WORKING")
# y = "POLITICS"
# corpus = dtf[dtf["y"] == y]["text_clean"]
# lst_tokens = nltk.tokenize.word_tokenize(corpus.str.cat(sep=" "))
# fig, ax = plt.subplots(nrows=1, ncols=2)
# fig.suptitle("Most frequent words", fontsize=15)
# ## unigrams
# dic_words_freq = nltk.FreqDist(lst_tokens)
# dtf_uni = pd.DataFrame(dic_words_freq.most_common(),
# columns=["Word", "Freq"])
# dtf_uni.set_index("Word").iloc[:top, :].sort_values(by="Freq").plot(
# kind="barh", title="Unigrams", ax=ax[0],
# legend=False).grid(axis='x')
# ax[0].set(ylabel=None)
# ## bigrams
# dic_words_freq = nltk.FreqDist(nltk.ngrams(lst_tokens, 2))
# dtf_bi = pd.DataFrame(dic_words_freq.most_common(),
# columns=["Word", "Freq"])
# dtf_bi["Word"] = dtf_bi["Word"].apply(lambda x: " ".join(
# string for string in x))
# dtf_bi.set_index("Word").iloc[:, :].sort_values(by="Freq").plot(
# kind="barh", title="Bigrams", ax=ax[1],
# legend=False).grid(axis='x')
# ax[1].set(ylabel=None)
# plt.show()
# add word frequency as a feature in your dataframe.
# using 3 n-grams: "box office" (frequent in Entertainment), "republican" (frequent in Politics), "apple" (frequent in Tech).
lst_words = ["box-office", "republican", "apple"]
## count
lst_grams = [len(word.split(" ")) for word in lst_words]
vectorizer = feature_extraction.text.CountVectorizer(
vocabulary=lst_words,
ngram_range=(min(lst_grams), max(lst_grams)))
dtf_X = pd.DataFrame(vectorizer.fit_transform(
dtf["text_clean"]).todense(), columns=lst_words)
## add the new features as columns
dtf = pd.concat([dtf, dtf_X.set_index(dtf.index)], axis=1)
print("\n--- OUTPUT_16 WORD FREQUENCY")
print(dtf.head())
# WORD CLOUD
# print("\n--- OUTPUT_16 WORD CLOUD")
# wc = wordcloud.WordCloud(background_color='black', max_words=100,
# max_font_size=35)
# wc = wc.generate(str(corpus))
# fig = plt.figure(num=1)
# plt.axis('off')
# plt.imshow(wc, cmap=None)
# plt.show()
# WORD VECTORS
# nlp = gensim_api.load("glove-wiki-gigaword-300")
# Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)
# https://github.com/RaRe-Technologies/gensim-data
nlp = gensim_api.load("glove-wiki-gigaword-50")
# We can use this object to map words to vectors.
word = "love"
print("\n--- OUTPUT_16 WORD CLOUD")
print(nlp[word])
print(nlp[word].shape)
"""Now let’s see what are the closest word vectors or, to put in another way, the words that mostly appear in similar contexts. In order to plot the vectors in a two-dimensional space, I need to reduce the dimensions from 300 to 2. I am going to do that with t-distributed Stochastic Neighbor Embedding from Scikit-learn. t-SNE is a tool to visualize high-dimensional data that converts similarities between data points to joint probabilities.
"""
print("\n--- OUTPUT_16 FIND CLOSEST VECTORS")
## find closest vectors
# labels, X, x, y = [], [], [], []
# for t in nlp.most_similar(word, topn=20):
# X.append(nlp[t[0]])
# labels.append(t[0])
# ## reduce dimensions
# pca = manifold.TSNE(perplexity=40, n_components=2, init='pca')
# new_values = pca.fit_transform(X)
# for value in new_values:
# x.append(value[0])
# y.append(value[1])
# ## plot
# fig = plt.figure()
# for i in range(len(x)):
# plt.scatter(x[i], y[i], c="black")
# plt.annotate(labels[i], xy=(x[i], y[i]), xytext=(5, 2),
# textcoords='offset points', ha='right', va='bottom')
# ## add center
# plt.scatter(x=0, y=0, c="red")
# plt.annotate(word, xy=(0, 0), xytext=(5, 2), textcoords='offset
# points', ha='right', va='bottom')
# Topic Modeling
"""
Let’s see what topics we can extract from Tech news. I need to specify the number of topics the model has to cluster, I am going to try with 3.
"""
# y = "TECH"
# corpus = dtf[dtf["y"] == y]["text_clean"]
# ## pre-process corpus
# lst_corpus = []
# for string in corpus:
# lst_words = string.split()
# lst_grams = [" ".join(lst_words[i:i + 2]) for i in range(0,len(lst_words), 2)]
# lst_corpus.append(lst_grams)
# ## map words to an id
# id2word = gensim.corpora.Dictionary(lst_corpus)
# ## create dictionary word:freq
# dic_corpus = [id2word.doc2bow(word) for word in lst_corpus]
# ## train LDA
# lda_model = gensim.models.ldamodel.LdaModel(corpus=dic_corpus, id2word=id2word, num_topics=3,random_state=123, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True)
# ## output
# lst_dics = []
# for i in range(0, 3):
# lst_tuples = lda_model.get_topic_terms(i)
# for tupla in lst_tuples:
# lst_dics.append({"topic": i, "id": tupla[0],
# "word": id2word[tupla[0]],
# "weight": tupla[1]})
# dtf_topics = pd.DataFrame(lst_dics,
# columns=['topic', 'id', 'word', 'weight'])
# ## plot
# fig, ax = plt.subplots()
# sns.barplot(y="word", x="weight", hue="topic", data=dtf_topics,
# dodge=False, ax=ax).set_title('Main Topics')
# ax.set(ylabel="", xlabel="Word Importance")
# plt.show()
""" [Conclusion]
This article has been a tutorial to demonstrate how to analyze text data with NLP and extract features for a machine learning model.
I showed how to detect the language the data is in, and how to preprocess and clean text. Then I explained different measures of length, did sentiment analysis with Textblob, and we used SpaCy for named-entity recognition. Finally, I explained the differences between traditional word frequency approaches with Scikit-learn and modern language models using Gensim.
Now you know pretty much all the NLP basics to start working with text data.
"""
| mit |
murder77/electrum | plugins/__init__.py | 4 | 4981 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import electrum
from electrum.i18n import _
descriptions = [
{
'name': 'audio_modem',
'fullname': _('Audio MODEM'),
'description': _('Provides support for air-gapped transaction signing.'),
'requires': [('amodem', 'http://github.com/romanz/amodem/')],
'available_for': ['qt'],
},
{
'name': 'btchipwallet',
'fullname': _('Ledger Wallet'),
'description': _('Provides support for Ledger hardware wallet'),
'requires': [('btchip', 'github.com/ledgerhq/btchip-python')],
'requires_wallet_type': ['btchip'],
'registers_wallet_type': ('hardware', 'btchip', _("Ledger wallet")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'cosigner_pool',
'fullname': _('Cosigner Pool'),
'description': ' '.join([
_("This plugin facilitates the use of multi-signatures wallets."),
_("It sends and receives partially signed transactions from/to your cosigner wallet."),
_("Transactions are encrypted and stored on a remote server.")
]),
'requires_wallet_type': ['2of2', '2of3'],
'available_for': ['qt'],
},
{
'name': 'email_requests',
'fullname': 'Email',
'description': _("Send and receive payment request with an email account"),
'available_for': ['qt'],
},
{
'name': 'exchange_rate',
'fullname': _("Exchange rates"),
'description': _("Exchange rates and currency conversion tools."),
'available_for': ['qt'],
},
{
'name': 'greenaddress_instant',
'fullname': 'GreenAddress instant',
'description': _("Allows validating if your transactions have instant confirmations by GreenAddress"),
'available_for': ['qt'],
},
{
'name':'keepkey',
'fullname': 'KeepKey',
'description': _('Provides support for KeepKey hardware wallet'),
'requires': [('keepkeylib','github.com/keepkey/python-keepkey')],
'requires_wallet_type': ['keepkey'],
'registers_wallet_type': ('hardware', 'keepkey', _("KeepKey wallet")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'labels',
'fullname': _('LabelSync'),
'description': '\n'.join([
_("The new and improved LabelSync plugin. This can sync your labels across multiple Electrum installs by using a remote database to save your data. Labels, transactions ids and addresses are encrypted before they are sent to the remote server."),
_("The label sync's server software is open-source as well and can be found on github.com/maran/electrum-sync-server")
]),
'available_for': ['qt']
},
{
'name': 'plot',
'fullname': 'Plot History',
'description': _("Ability to plot transaction history in graphical mode."),
'requires': [('matplotlib', 'matplotlib')],
'available_for': ['qt'],
},
{
'name':'trezor',
'fullname': 'Trezor Wallet',
'description': _('Provides support for Trezor hardware wallet'),
'requires': [('trezorlib','github.com/trezor/python-trezor')],
'requires_wallet_type': ['trezor'],
'registers_wallet_type': ('hardware', 'trezor', _("Trezor wallet")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'trustedcoin',
'fullname': _('Two Factor Authentication'),
'description': ''.join([
_("This plugin adds two-factor authentication to your wallet."), '<br/>',
_("For more information, visit"),
" <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
]),
'requires_wallet_type': ['2fa'],
'registers_wallet_type': ('twofactor', '2fa', _("Wallet with two-factor authentication")),
'available_for': ['qt', 'cmdline'],
},
{
'name': 'virtualkeyboard',
'fullname': 'Virtual Keyboard',
'description': '%s\n%s' % (_("Add an optional virtual keyboard to the password dialog."), _("Warning: do not use this if it makes you pick a weaker password.")),
'available_for': ['qt'],
}
]
| gpl-3.0 |
0x0all/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 43 | 3343 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', linewidths=0, zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
Eigenstate/msmbuilder | msmbuilder/decomposition/__init__.py | 7 | 1228 | from __future__ import absolute_import
from sklearn import decomposition as _decomposition
from .base import MultiSequenceDecompositionMixin
from .ktica import KernelTICA
from .pca import PCA, SparsePCA, MiniBatchSparsePCA
from .sparsetica import SparseTICA
from .ksparsetica import KSparseTICA
from .tica import tICA
class FastICA(MultiSequenceDecompositionMixin, _decomposition.FastICA):
__doc__ = _decomposition.FastICA.__doc__
def summarize(self):
return '\n'.join([
"Independent Component Analysis (ICA)",
"----------",
"Number of components: {n_components}",
"Number of iterations: {n_iter_}",
]).format(**self.__dict__)
class FactorAnalysis(MultiSequenceDecompositionMixin,
_decomposition.FactorAnalysis):
__doc__ = _decomposition.FactorAnalysis.__doc__
def summarize(self):
return '\n'.join([
"FactorAnalysis (FA)",
"----------",
"Number of components: {n_components}",
"Log likelihood: {loglike_}",
"Noise variance: {noise_variance_}",
"Number of iterations: {n_iter_}",
]).format(**self.__dict__)
| lgpl-2.1 |
allantu/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
mayblue9/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
enigmampc/catalyst | catalyst/utils/calendars/trading_calendar.py | 1 | 29843 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractproperty
from lru import LRU
import warnings
from pandas.tseries.holiday import AbstractHolidayCalendar
from six import with_metaclass
from numpy import searchsorted
import numpy as np
import pandas as pd
from pandas import (
DataFrame,
date_range,
DatetimeIndex,
)
from pandas.tseries.offsets import CustomBusinessDay
from catalyst.utils.calendars._calendar_helpers import (
next_divider_idx,
previous_divider_idx,
is_open,
minutes_to_session_labels,
)
from catalyst.utils.input_validation import (
attrgetter,
coerce,
preprocess,
)
from catalyst.utils.memoize import lazyval
start_default = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end_default = end_base + pd.Timedelta(days=365)
NANOS_IN_MINUTE = 60000000000
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
class TradingCalendar(with_metaclass(ABCMeta)):
"""
A TradingCalendar represents the timing information of a single market
exchange.
The timing information is made up of two parts: sessions, and opens/closes.
A session represents a contiguous set of minutes, and has a label that is
midnight UTC. It is important to note that a session label should not be
considered a specific point in time, and that midnight UTC is just being
used for convenience.
For each session, we store the open and close time in UTC time.
"""
def __init__(self, start=start_default, end=end_default):
# Midnight in UTC for each trading day.
# In pandas 0.18.1, pandas calls into its own code here in a way that
# fires a warning. The calling code in pandas tries to suppress the
# warning, but does so incorrectly, causing it to bubble out here.
# Actually catch and suppress the warning here:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
_all_days = date_range(start, end, freq=self.day, tz='UTC')
# `DatetimeIndex`s of standard opens/closes for each day.
self._opens = days_at_time(_all_days, self.open_time, self.tz,
self.open_offset)
self._closes = days_at_time(
_all_days, self.close_time, self.tz, self.close_offset
)
# `DatetimeIndex`s of nonstandard opens/closes
_special_opens = self._calculate_special_opens(start, end)
_special_closes = self._calculate_special_closes(start, end)
# Overwrite the special opens and closes on top of the standard ones.
_overwrite_special_dates(_all_days, self._opens, _special_opens)
_overwrite_special_dates(_all_days, self._closes, _special_closes)
# In pandas 0.16.1 _opens and _closes will lose their timezone
# information. This looks like it has been resolved in 0.17.1.
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
self.schedule = DataFrame(
index=_all_days,
columns=['market_open', 'market_close'],
data={
'market_open': self._opens,
'market_close': self._closes,
},
dtype='datetime64[ns, UTC]',
)
# Simple cache to avoid recalculating the same minute -> session in
# "next" mode. Analysis of current catalyst code paths show that
# `minute_to_session_label` is often called consecutively with the same
# inputs.
self._minute_to_session_label_cache = LRU(1)
self.market_opens_nanos = self.schedule.market_open.values.\
astype(np.int64)
self.market_closes_nanos = self.schedule.market_close.values.\
astype(np.int64)
self._trading_minutes_nanos = self.all_minutes.values.\
astype(np.int64)
self.first_trading_session = _all_days[0]
self.last_trading_session = _all_days[-1]
self._early_closes = pd.DatetimeIndex(
_special_closes.map(self.minute_to_session_label)
)
@lazyval
def day(self):
return CustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
)
@abstractproperty
def name(self):
raise NotImplementedError()
@abstractproperty
def tz(self):
raise NotImplementedError()
@abstractproperty
def open_time(self):
raise NotImplementedError()
@abstractproperty
def close_time(self):
raise NotImplementedError()
@property
def open_offset(self):
return 0
@property
def close_offset(self):
return 0
@lazyval
def _minutes_per_session(self):
diff = self.schedule.market_close - self.schedule.market_open
diff = diff.astype('timedelta64[m]')
return diff + 1
def minutes_count_for_sessions_in_range(self, start_session, end_session):
"""
Parameters
----------
start_session: pd.Timestamp
The first session.
end_session: pd.Timestamp
The last session.
Returns
-------
int: The total number of minutes for the contiguous chunk of sessions.
between start_session and end_session, inclusive.
"""
return int(self._minutes_per_session[start_session:end_session].sum())
@property
def regular_holidays(self):
"""
Returns
-------
pd.AbstractHolidayCalendar: a calendar containing the regular holidays
for this calendar
"""
return None
@property
def adhoc_holidays(self):
return []
@property
def special_opens(self):
"""
A list of special open times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_opens_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
@property
def special_closes(self):
"""
A list of special close times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_closes_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
# -----
@property
def opens(self):
return self.schedule.market_open
@property
def closes(self):
return self.schedule.market_close
@property
def early_closes(self):
return self._early_closes
def is_session(self, dt):
"""
Given a dt, returns whether it's a valid session label.
Parameters
----------
dt: pd.Timestamp
The dt that is being tested.
Returns
-------
bool
Whether the given dt is a valid session label.
"""
return dt in self.schedule.index
def is_open_on_minute(self, dt):
"""
Given a dt, return whether this exchange is open at the given dt.
Parameters
----------
dt: pd.Timestamp
The dt for which to check if this exchange is open.
Returns
-------
bool
Whether the exchange is open on this dt.
"""
return is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value)
def next_open(self, dt):
"""
Given a dt, returns the next open.
If the given dt happens to be a session open, the next session's open
will be returned.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next open.
Returns
-------
pd.Timestamp
The UTC timestamp of the next open.
"""
idx = next_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz='UTC')
def next_close(self, dt):
"""
Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close.
"""
idx = next_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz='UTC')
def previous_open(self, dt):
"""
Given a dt, returns the previous open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous open.
Returns
-------
pd.Timestamp
The UTC imestamp of the previous open.
"""
idx = previous_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz='UTC')
def previous_close(self, dt):
"""
Given a dt, returns the previous close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous close.
Returns
-------
pd.Timestamp
The UTC timestamp of the previous close.
"""
idx = previous_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz='UTC')
def next_minute(self, dt):
"""
Given a dt, return the next exchange minute. If the given dt is not
an exchange minute, returns the next exchange open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next exchange minute.
Returns
-------
pd.Timestamp
The next exchange minute.
"""
idx = next_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
def previous_minute(self, dt):
"""
Given a dt, return the previous exchange minute.
Raises KeyError if the given timestamp is not an exchange minute.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous exchange minute.
Returns
-------
pd.Timestamp
The previous exchange minute.
"""
idx = previous_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
def next_session_label(self, session_label):
"""
Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError:
if idx == len(self.schedule.index) - 1:
raise ValueError("There is no next session as this is the end"
" of the exchange calendar.")
else:
raise
def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1]
def minutes_for_session(self, session_label):
"""
Given a session label, return the minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the minutes for the given session.
"""
return self.minutes_in_range(
start_minute=self.schedule.at[session_label, 'market_open'],
end_minute=self.schedule.at[session_label, 'market_close'],
)
def minutes_window(self, start_dt, count):
start_dt_nanos = start_dt.value
all_minutes_nanos = self._trading_minutes_nanos
start_idx = all_minutes_nanos.searchsorted(start_dt_nanos)
# searchsorted finds the index of the minute **on or after** start_dt.
# If the latter, push back to the prior minute.
if all_minutes_nanos[start_idx] != start_dt_nanos:
start_idx -= 1
if start_idx < 0 or start_idx >= len(all_minutes_nanos):
raise KeyError("Can't start minute window at {}".format(start_dt))
end_idx = start_idx + count
if start_idx > end_idx:
return self.all_minutes[(end_idx + 1):(start_idx + 1)]
else:
return self.all_minutes[start_idx:end_idx]
def sessions_in_range(self, start_session_label, end_session_label):
"""
Given start and end session labels, return all the sessions in that
range, inclusive.
Parameters
----------
start_session_label: pd.Timestamp (midnight UTC)
The label representing the first session of the desired range.
end_session_label: pd.Timestamp (midnight UTC)
The label representing the last session of the desired range.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
return self.all_sessions[
self.all_sessions.slice_indexer(
start_session_label,
end_session_label
)
]
def sessions_window(self, session_label, count):
"""
Given a session label and a window size, returns a list of sessions
of size `count` + 1, that either starts with the given session
(if `count` is positive) or ends with the given session (if `count` is
negative).
Parameters
----------
session_label: pd.Timestamp
The label of the initial session.
count: int
Defines the length and the direction of the window.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
start_idx = self.schedule.index.get_loc(session_label)
end_idx = start_idx + count
return self.all_sessions[
min(start_idx, end_idx):max(start_idx, end_idx) + 1
]
def session_distance(self, start_session_label, end_session_label):
"""
Given a start and end session label, returns the distance between
them. For example, for three consecutive sessions Mon., Tues., and
Wed, `session_distance(Mon, Wed)` would return 2.
Parameters
----------
start_session_label: pd.Timestamp
The label of the start session.
end_session_label: pd.Timestamp
The label of the ending session.
Returns
-------
int
The distance between the two sessions.
"""
start_idx = self.all_sessions.searchsorted(
self.minute_to_session_label(start_session_label)
)
end_idx = self.all_sessions.searchsorted(
self.minute_to_session_label(end_session_label)
)
return abs(end_idx - start_idx)
def minutes_in_range(self, start_minute, end_minute):
"""
Given start and end minutes, return all the calendar minutes
in that range, inclusive.
Given minutes don't need to be calendar minutes.
Parameters
----------
start_minute: pd.Timestamp
The minute representing the start of the desired range.
end_minute: pd.Timestamp
The minute representing the end of the desired range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
start_idx = searchsorted(self._trading_minutes_nanos,
start_minute.value)
end_idx = searchsorted(self._trading_minutes_nanos,
end_minute.value)
if end_minute.value == self._trading_minutes_nanos[end_idx]:
# if the end minute is a market minute, increase by 1
end_idx += 1
return self.all_minutes[start_idx:end_idx]
def minutes_for_sessions_in_range(self, start_session_label,
end_session_label):
"""
Returns all the minutes for all the sessions from the given start
session label to the given end session label, inclusive.
Parameters
----------
start_session_label: pd.Timestamp
The label of the first session in the range.
end_session_label: pd.Timestamp
The label of the last session in the range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
first_minute, _ = self.open_and_close_for_session(start_session_label)
_, last_minute = self.open_and_close_for_session(end_session_label)
return self.minutes_in_range(first_minute, last_minute)
def open_and_close_for_session(self, session_label):
"""
Returns a tuple of timestamps of the open and close of the session
represented by the given label.
Parameters
----------
session_label: pd.Timestamp
The session whose open and close are desired.
Returns
-------
(Timestamp, Timestamp)
The open and close for the given session.
"""
sched = self.schedule
# `market_open` and `market_close` should be timezone aware, but pandas
# 0.16.1 does not appear to support this:
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
return (
sched.at[session_label, 'market_open'].tz_localize('UTC'),
sched.at[session_label, 'market_close'].tz_localize('UTC'),
)
def session_open(self, session_label):
return self.schedule.at[
session_label,
'market_open'
].tz_localize('UTC')
def session_close(self, session_label):
return self.schedule.at[
session_label,
'market_close'
].tz_localize('UTC')
def session_opens_in_range(self, start_session_label, end_session_label):
return self.schedule.loc[
start_session_label:end_session_label,
'market_open',
].dt.tz_convert('UTC')
def session_closes_in_range(self, start_session_label, end_session_label):
return self.schedule.loc[
start_session_label:end_session_label,
'market_close',
].dt.tz_convert('UTC')
@property
def all_sessions(self):
return self.schedule.index
@property
def first_session(self):
return self.all_sessions[0]
@property
def last_session(self):
return self.all_sessions[-1]
def execution_time_from_open(self, open_dates):
return open_dates
def execution_time_from_close(self, close_dates):
return close_dates
def _all_minutes_with_interval(self, interval):
"""
Returns a DatetimeIndex representing all the minutes in this calendar.
"""
opens_in_ns = \
self._opens.values.astype('datetime64[ns]')
closes_in_ns = \
self._closes.values.astype('datetime64[ns]')
deltas = closes_in_ns - opens_in_ns
nanos_in_interval = interval * NANOS_IN_MINUTE
# + 1 because we want 390 days per standard day, not 389
daily_sizes = (deltas / nanos_in_interval) + 1
num_minutes = np.sum(daily_sizes).astype(np.int64)
# One allocation for the entire thing. This assumes that each day
# represents a contiguous block of minutes.
all_minutes = np.empty(num_minutes, dtype='datetime64[ns]')
idx = 0
for day_idx, size in enumerate(daily_sizes):
# lots of small allocations, but it's fast enough for now.
# size is a np.timedelta64, so we need to int it
size_int = int(size)
all_minutes[idx:(idx + size_int)] = \
np.arange(
opens_in_ns[day_idx],
closes_in_ns[day_idx] + NANOS_IN_MINUTE,
nanos_in_interval)
idx += size_int
return DatetimeIndex(all_minutes).tz_localize("UTC")
@lazyval
def all_minutes(self):
"""
Returns a DatetimeIndex representing all the minutes in this calendar.
"""
return self._all_minutes_with_interval(1)
@preprocess(dt=coerce(pd.Timestamp, attrgetter('value')))
def minute_to_session_label(self, dt, direction="next"):
"""
Given a minute, get the label of its containing session.
Parameters
----------
dt : pd.Timestamp or nanosecond offset
The dt for which to get the containing session.
direction: str
"next" (default) means that if the given dt is not part of a
session, return the label of the next session.
"previous" means that if the given dt is not part of a session,
return the label of the previous session.
"none" means that a KeyError will be raised if the given
dt is not part of a session.
Returns
-------
pd.Timestamp (midnight UTC)
The label of the containing session.
"""
if direction == "next":
try:
return self._minute_to_session_label_cache[dt]
except KeyError:
pass
idx = searchsorted(self.market_closes_nanos, dt)
current_or_next_session = self.schedule.index[idx]
self._minute_to_session_label_cache[dt] = current_or_next_session
if direction == "next":
return current_or_next_session
elif direction == "previous":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, use the previous session
return self.schedule.index[idx - 1]
elif direction == "none":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, blow up
raise ValueError("The given dt is not an exchange minute!")
else:
# invalid direction
raise ValueError("Invalid direction parameter: "
"{0}".format(direction))
return current_or_next_session
def minute_index_to_session_labels(self, index):
"""
Given a sorted DatetimeIndex of market minutes, return a
DatetimeIndex of the corresponding session labels.
Parameters
----------
index: pd.DatetimeIndex or pd.Series
The ordered list of market minutes we want session labels for.
Returns
-------
pd.DatetimeIndex (UTC)
The list of session labels corresponding to the given minutes.
"""
def minute_to_session_label_nanos(dt_nanos):
return self.minute_to_session_label(dt_nanos).value
return DatetimeIndex(minutes_to_session_labels(
index.values.astype(np.int64),
minute_to_session_label_nanos,
self.market_closes_nanos,
).astype('datetime64[ns]'), tz='UTC')
def _special_dates(self, calendars, ad_hoc_dates, start_date, end_date):
"""
Union an iterable of pairs of the form (time, calendar)
and an iterable of pairs of the form (time, [dates])
(This is shared logic for computing special opens and special closes.)
"""
_dates = DatetimeIndex([], tz='UTC').union_many(
[
holidays_at_time(calendar, start_date, end_date, time_,
self.tz)
for time_, calendar in calendars
] + [
days_at_time(datetimes, time_, self.tz)
for time_, datetimes in ad_hoc_dates
]
)
return _dates[(_dates >= start_date) & (_dates <= end_date)]
def _calculate_special_opens(self, start, end):
return self._special_dates(
self.special_opens,
self.special_opens_adhoc,
start,
end,
)
def _calculate_special_closes(self, start, end):
return self._special_dates(
self.special_closes,
self.special_closes_adhoc,
start,
end,
)
def days_at_time(days, t, tz, day_offset=0):
"""
Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Examples
--------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for US/Eastern. All the
times are still 8:45 when interpreted in US/Eastern.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern')
>>> pprint.pprint([str(dt) for dt in dts_at_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00']
"""
if len(days) == 0:
return days
# Offset days without tz to avoid timezone issues.
days = DatetimeIndex(days).tz_localize(None)
delta = pd.Timedelta(
days=day_offset,
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
return (days + delta).tz_localize(tz).tz_convert('UTC')
def holidays_at_time(calendar, start, end, time, tz):
return days_at_time(
calendar.holidays(start, end),
time,
tz=tz,
)
def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
"""
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.normalize())
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
opens_or_closes.values[indexer] = special_opens_or_closes.values
class HolidayCalendar(AbstractHolidayCalendar):
def __init__(self, rules):
super(HolidayCalendar, self).__init__(rules=rules)
| apache-2.0 |
vortex-ape/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
mcallaghan/tmv | BasicBrowser/tmv_app/models.py | 1 | 27255 | from django.db import models
from django.contrib.postgres.fields import ArrayField
import scoping, parliament
from django.contrib.auth.models import User
import numpy as np
import random
from scipy.sparse import csr_matrix, coo_matrix
from MulticoreTSNE import MulticoreTSNE as mTSNE
import os
from datetime import timedelta
from django.db.models.functions import Ln
from django.db.models import F
from psqlextra.types import PostgresPartitioningMethod
from psqlextra.models import PostgresPartitionedModel
import architect
from django.db import connection
import re
class MinMaxFloat(models.FloatField):
"""
A float field with a minimum and a maximum
"""
def __init__(self, min_value=None, max_value=None, *args, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(MinMaxFloat, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'min_value': self.min_value, 'max_value' : self.max_value}
defaults.update(kwargs)
return super(MinMaxFloat, self).formfield(**defaults)
#################################################
## Below are some special model variants for hlda
## method
class HTopic(models.Model):
"""
A model for hierarchical topics
"""
topic = models.AutoField(primary_key=True)
parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True)
title = models.CharField(max_length=80, null=True)
n_docs = models.IntegerField(null=True)
n_words = models.IntegerField(null=True)
scale = models.FloatField(null=True)
run_id = models.IntegerField(null=True, db_index=True)
class HTopicTerm(models.Model):
"""
Links hierarchical topics to terms
"""
topic = models.ForeignKey('HTopic', on_delete=models.CASCADE)
term = models.ForeignKey('Term', on_delete=models.CASCADE)
count = models.IntegerField()
run_id = models.IntegerField(null=True, db_index=True)
#################################################
## Topic, Term and Doc are the three primary models
class Topic(models.Model):
"""
The default topic object. The title is usually set according to the top words
"""
title = models.CharField(max_length=80)
manual_title = models.CharField(max_length=80, null=True)
original_title = models.CharField(max_length=80, null=True)
score = models.FloatField(null=True)
share = models.FloatField(null=True)
growth = models.FloatField(null=True)
run_id = models.ForeignKey('RunStats',db_index=True, on_delete=models.CASCADE)
year = models.IntegerField(null=True)
period = models.ForeignKey('TimePeriod', on_delete=models.SET_NULL, null=True)
primary_dtopic = models.ManyToManyField('DynamicTopic')
top_words = ArrayField(models.TextField(),null=True)
primary_wg = models.IntegerField(null=True)
wg_prop = models.FloatField(null=True)
ipcc_coverage = models.FloatField(null=True)
ipcc_score = models.FloatField(null=True)
ipcc_share = models.FloatField(null=True)
wg_1 = models.FloatField(null=True)
wg_2 = models.FloatField(null=True)
wg_3 = models.FloatField(null=True)
def relevant_words(self, l, n):
# https://www.aclweb.org/anthology/W14-3110
tts = self.topicterm_set.annotate(
share = F('score') / F('alltopic_score'),
rel = l * Ln('score') + (1-l) * Ln('share'),
).filter(rel__isnull=False).order_by('-rel')[:n].values('term__title','rel')
return tts
def create_wordintrusion(self,user):
real_words = self.topicterm_set.order_by('-score')[:5]
scores = np.array(TopicTerm.objects.filter(topic__run_id=self.run_id).values_list('score', flat=True))
q99 = np.quantile(scores, 0.99)
q50 = np.quantile(scores, 0.5)
terms = set(Term.objects.filter(
topicterm__score__gt=q99,topicterm__topic__run_id=self.run_id
).values_list('pk',flat=True))
bad_terms = Term.objects.filter(
pk__in=terms,
topicterm__score__lt=q50,
topicterm__topic=self
)
if bad_terms.exists():
bad_term = bad_terms[random.randint(0,bad_terms.count()-1)]
else:
bad_term = Term.objects.filter(topicterm__topic=self).order_by('topicterm__score')[0]
word_intrusion = WordIntrusion(
topic=self,
user=user,
intruded_word=bad_term
)
word_intrusion.save()
for w in real_words:
word_intrusion.real_words.add(w.term)
def __unicode__(self):
return str(self.title)
def __str__(self):
return str(self.title)
class WordIntrusion(models.Model):
"""
Used to assess topic quality, in a given topic, can a user identify the
intruding word
"""
topic = models.ForeignKey('Topic', on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
real_words = models.ManyToManyField('Term')
intruded_word = models.ForeignKey('Term', on_delete=models.CASCADE, related_name="intruding_topic")
score = models.IntegerField(null=True)
class TopicIntrusion(models.Model):
"""
Used to assess topic quality, in a given document, can a user identify the
intruding topic
"""
doc = models.ForeignKey('scoping.Doc', on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
real_topics = models.ManyToManyField('Topic')
intruded_topic = models.ForeignKey('Topic', on_delete=models.CASCADE, related_name="intruding_doc")
score = models.IntegerField(null=True)
class DynamicTopic(models.Model):
"""
Holds the title, score and other information about dynamic topic models (dynamic nmf).
todo: is this used only for dynamic nmf?
"""
title = models.CharField(null=True, max_length=80)
score = models.FloatField(null=True)
share = models.FloatField(null=True)
size = models.IntegerField(null=True)
run_id = models.ForeignKey('RunStats', on_delete=models.CASCADE,db_index=True)
top_words = ArrayField(models.TextField(),null=True)
l5ys = models.FloatField(null=True)
l1ys = models.FloatField(null=True)
primary_wg = models.IntegerField(null=True)
ipcc_time_score = models.FloatField(null=True)
ipcc_coverage = models.FloatField(null=True)
ipcc_score = models.FloatField(null=True)
ipcc_share = models.FloatField(null=True)
wg_prop = models.FloatField(null=True)
wg_1 = models.FloatField(null=True)
wg_2 = models.FloatField(null=True)
wg_3 = models.FloatField(null=True)
def __unicode__(self):
return str(self.title)
def __str__(self):
return str(self.title)
class TimePeriod(models.Model):
"""
Model for a general time period (can be related to a parliamentary period with start and end date)
"""
title = models.CharField(null=True, max_length=80)
parlperiod = models.ForeignKey('parliament.ParlPeriod', null=True, on_delete=models.SET_NULL)
n = models.IntegerField()
ys = ArrayField(models.IntegerField(),null=True)
start_date = models.DateField(null=True)
end_date = models.DateField(null=True)
def __str__(self):
return str(self.title)
class TimeDocTotal(models.Model):
"""
Aggregates scores from a :model:`tmv_app.TimePeriod`
"""
period = models.ForeignKey(TimePeriod, on_delete=models.PROTECT)
run = models.ForeignKey('RunStats', on_delete=models.CASCADE)
n_docs = models.IntegerField(null=True)
dt_score = models.FloatField(null=True)
class TimeDTopic(models.Model):
"""
Holds the score of a :model:`tmv_app.DynamicTopic` within a :model:`tmv_app.TimePeriod`
"""
period = models.ForeignKey(TimePeriod, on_delete=models.PROTECT)
dtopic = models.ForeignKey('DynamicTopic', on_delete=models.CASCADE)
score = models.FloatField(default=0)
share = models.FloatField(default=0)
pgrowth = models.FloatField(null=True)
pgrowthn = models.FloatField(null=True)
ipcc_score = models.FloatField(null=True)
ipcc_coverage=models.FloatField(null=True)
ipcc_share = models.FloatField(null=True)
class TopicDTopic(models.Model):
"""
Holds the score of a :model:`tmv_app.Topic` within a :model:`tmv_app.DynamicTopic`
"""
topic = models.ForeignKey('Topic', on_delete=models.CASCADE, null=True)
dynamictopic = models.ForeignKey('DynamicTopic', on_delete=models.CASCADE,null=True)
score = models.FloatField(null=True)
class TopicCorr(models.Model):
"""
Holds the correlation between two :model:`tmv_app.Topic` s
todo: specify which type of correlation?
"""
topic = models.ForeignKey('Topic', on_delete=models.CASCADE,null=True)
topiccorr = models.ForeignKey('Topic', on_delete=models.CASCADE ,null=True, related_name='Topiccorr')
score = models.FloatField(null=True)
ar = models.IntegerField(default=-1)
period = models.ForeignKey('TimePeriod', on_delete=models.SET_NULL, null=True)
run_id = models.IntegerField(db_index=True)
def __unicode__(self):
return str(self.title)
class DynamicTopicCorr(models.Model):
"""
Holds the correlation between two :model:`tmv_app.DynamicTopic` s
"""
topic = models.ForeignKey('DynamicTopic', on_delete=models.CASCADE,null=True)
topiccorr = models.ForeignKey('DynamicTopic', on_delete=models.CASCADE,null=True, related_name='Topiccorr')
score = models.FloatField(null=True)
ar = models.IntegerField(default=-1)
period = models.ForeignKey('TimePeriod', on_delete=models.SET_NULL, null=True)
run_id = models.IntegerField(db_index=True)
def __unicode__(self):
return str(self.title)
class Term(models.Model):
"""
Terms (tokens) of topic models
"""
title = models.CharField(max_length=100, db_index=True)
run_id = models.ManyToManyField('RunStats')
def __unicode__(self):
return str(self.title)
def __str__(self):
return str(self.title)
#################################################
## Docs are all in scoping now!
## todo: think about how to link specific document types to a generalized document model
#################################################
class TopicYear(models.Model):
"""
Holds total scores of topics per year
"""
topic = models.ForeignKey('Topic', on_delete=models.CASCADE,null=True)
PY = models.IntegerField()
score = models.FloatField(null=True)
share = models.FloatField(null=True)
count = models.FloatField(null=True)
run_id = models.IntegerField(db_index=True)
class TopicARScores(models.Model):
"""
Holds total scores of topics per Assessment period (:model:`scoping:AR`)
todo: could this be replaced by linking the general TimePeriod to AR?
"""
topic = models.ForeignKey('Topic', on_delete=models.CASCADE,null=True)
ar = models.ForeignKey('scoping.AR', on_delete=models.CASCADE,null=True)
score = models.FloatField(null=True)
share = models.FloatField(null=True)
pgrowth = models.FloatField(null=True)
pgrowthn = models.FloatField(null=True)
class TopicTimePeriodScores(models.Model):
"""
Holds scores of a :model:`tmv_app.Topic` from a :model:`tmv_app.TimePeriod`
"""
topic = models.ForeignKey('Topic', on_delete=models.CASCADE, null=True)
period = models.ForeignKey('TimePeriod', on_delete=models.SET_NULL, null=True)
score = models.FloatField(null=True)
share = models.FloatField(null=True)
pgrowth = models.FloatField(null=True)
pgrowthn = models.FloatField(null=True)
class DynamicTopicARScores(models.Model):
"""
Holds scores of a :model:`tmv_app.DynamicTopic` from an Assessment Period (:model:`scoping.AR`)
"""
topic = models.ForeignKey('DynamicTopic', on_delete=models.CASCADE, null=True)
ar = models.ForeignKey('scoping.AR', on_delete=models.SET_NULL, null=True)
score = models.FloatField(null=True)
share = models.FloatField(null=True)
pgrowth = models.FloatField(null=True)
pgrowthn = models.FloatField(null=True)
class DynamicTopicTimePeriodScores(models.Model):
"""
Holds scores of a :model:`tmv_app.DynamicTopic` from a :model:`TimePeriod`
"""
topic = models.ForeignKey('DynamicTopic', on_delete=models.CASCADE,null=True)
period = models.ForeignKey('TimePeriod', on_delete=models.SET_NULL, null=True)
score = models.FloatField(null=True)
share = models.FloatField(null=True)
pgrowth = models.FloatField(null=True)
pgrowthn = models.FloatField(null=True)
#################################################
## Separate topicyear for htopic
class HTopicYear(models.Model):
"""
todo
"""
topic = models.ForeignKey('HTopic', on_delete=models.CASCADE, null=True)
PY = models.IntegerField()
score = models.FloatField()
count = models.FloatField()
run_id = models.IntegerField(db_index=True)
#################################################
## DocTopic and TopicTerm map contain topic scores
## for docs and topics respectively
class DocTopic(PostgresPartitionedModel):
"""
Relates :model:`scoping.Doc` or objects from parliament (paragraphs, speeches) with :model:`tmv_app.Topics` and holds the corresponding topic scores
"""
class PartitioningMeta:
method = PostgresPartitioningMethod.RANGE
key = ["run_id"]
doc = models.ForeignKey('scoping.Doc', null=True, on_delete=models.SET_NULL)
par = models.ForeignKey('parliament.Paragraph',null=True, on_delete=models.SET_NULL)
ut = models.ForeignKey('parliament.Utterance',null=True, on_delete=models.SET_NULL)
topic = models.ForeignKey('Topic',null=True, on_delete=models.CASCADE)
score = models.FloatField()
scaled_score = models.FloatField()
run_id = models.IntegerField(db_index=True)
class DocDynamicTopic(models.Model):
"""
Relates :model:`scoping.Doc` with :model:`tmv_app.Topic` and holds the corresponding topic score
"""
doc = models.ForeignKey('scoping.Doc', null=True, on_delete=models.SET_NULL)
topic = models.ForeignKey('DynamicTopic',null=True, on_delete=models.CASCADE)
score = models.FloatField()
run_id = models.IntegerField(db_index=True)
class TopicTerm(models.Model):
"""
Relates :model:`tmv_app.Topic` with :model:`tmv_app.Term` and holds the corresponding term score
"""
topic = models.ForeignKey('Topic',null=True, on_delete=models.CASCADE)
term = models.ForeignKey('Term', on_delete=models.SET_NULL, null=True)
PY = models.IntegerField(db_index=True,null=True)
score = models.FloatField()
alltopic_score = models.FloatField(null=True)
run_id = models.IntegerField(db_index=True)
class DynamicTopicTerm(models.Model):
"""
Relates :model:`tmv_app.DynamicTopic` with :model:`tmv_app.Term` and holds the corresponding term score
"""
topic = models.ForeignKey('DynamicTopic', null=True, on_delete=models.CASCADE)
term = models.ForeignKey('Term', on_delete=models.SET_NULL, null=True)
PY = models.IntegerField(db_index=True, null=True)
score = models.FloatField()
run_id = models.IntegerField(db_index=True)
class KFold(models.Model):
"""
Stores information from K-fold model validation (see tasks.py: function k_fold)
"""
model = models.ForeignKey('RunStats', on_delete=models.CASCADE)
K = models.IntegerField()
error = models.FloatField(null=True)
class TermPolarity(models.Model):
"""
Records the polarity of :model:`tmv_app:Term` (for sentiment analysis using a dictionary approach)
"""
term = models.ForeignKey(Term, on_delete=models.CASCADE)
polarity = models.FloatField(null=True)
POS = models.TextField(null=True, verbose_name="part of speech")
source = models.TextField()
#################################################
## RunStats and Settings....
class RunStats(models.Model):
"""
Hold all meta-information on topic model runs
"""
run_id = models.AutoField(primary_key=True)
##Inputs
ONLINE = "on"
BATCH = "ba"
lda_choices = (
(ONLINE, 'Online'),
(BATCH, 'Batch'),
)
SKLEARN = "sk"
LDA_LIB = "ld"
WARP = "wl"
lda_libs = (
(SKLEARN, "Sklearn"),
(LDA_LIB, "lda"),
(WARP, "warplda")
)
max_features = models.IntegerField(default=0, help_text = 'Maximum number of terms (0 = no limit)')
min_freq = models.IntegerField(default=1, help_text = 'Minimum frequency of terms')
max_df = MinMaxFloat(default=0.95, min_value=0.0, max_value=1.0)
limit = models.IntegerField(null=True, default=0, help_text='Limit model to first x documents (leave as zero for no limit)')
ngram = models.IntegerField(null=True, default=1, help_text='Length of feature n_gram')
db = models.BooleanField(default=True, help_text='Record the results into the database? Or just run the model and record statistics?')
fancy_tokenization = models.BooleanField(default=False, help_text='tokenize so that multiple word keywords remain whole')
K = models.IntegerField(null=True, help_text='Number of topics')
alpha = models.FloatField(null=True, default=0.01, help_text='Concentration parameter of Dirichlet distribution of topics in documents'
' (try higher values in LDA, including > 1). Low (high) values indicate that'
' documents should be composed of few (many) topics. Also called theta.'
' In NMF, this is the regularization term alpha')
beta = models.FloatField(null=True, blank=True, default=None, help_text='Concentration parameter of Dirichlet distribution of words in topics.'
' Low (high) values indicate that topics should be composed of few (many) words.'
' Also called eta. This parameter is not used in NMF')
lda_learning_method = models.CharField(max_length = 2, choices=lda_choices, null=True, default=BATCH, help_text='When using LDA in sklearn, you can choose between batch or online learning')
lda_library = models.CharField(max_length = 2, choices=lda_libs, null=True, default=SKLEARN,help_text = 'you can use sklearn or https://github.com/lda-project/lda for LDA')
top_chain_var = models.FloatField(null=True, default=0.05, help_text='Chain var parameter for dtm')
max_iter = models.IntegerField(null=True, default=200, help_text='Maximum iterations')
rng_seed = models.IntegerField(null=True, help_text="seed for random number generator for stochastic estimation of topic model (blei dtm)")
fulltext = models.BooleanField(default=False, help_text='do analysis on fullText? (dependent on availability)')
citations = models.BooleanField(default=False, help_text='scale term scores by citations?')
# Additional information
language = models.TextField(null=True, help_text='language of the documents that have been analyzed (also used for stopword identification)')
extra_stopwords = ArrayField(models.TextField(), null=True, help_text='list of stopwords that are used additionally to the standard ones')
query = models.ForeignKey('scoping.Query', null=True, on_delete=models.CASCADE, help_text='relation to the scoping search object')
psearch = models.ForeignKey('parliament.Search',null=True, on_delete=models.CASCADE, help_text='relation to the parliamentary search object')
## Progress
process_id = models.IntegerField(null=True)
start = models.DateTimeField(auto_now_add=True)
batch_count = models.IntegerField(default=0)
last_update = models.DateTimeField(auto_now_add=True)
topic_titles_current = models.NullBooleanField(default=False)
topic_scores_current = models.NullBooleanField(default=False)
topic_year_scores_current = models.NullBooleanField(default=False)
## Time spent
runtime = models.DurationField(null=True)
nmf_time = models.FloatField(default=0)
tfidf_time = models.FloatField(default=0)
db_time = models.FloatField(default=0)
status_choices = (
(0,'Not Started'),
(1,'Running'),
(2,'Interrupted'),
(3,'Finished')
)
status = models.IntegerField(
choices = status_choices,
default = 0,
help_text='status of the model execution'
)
parent_run_id = models.IntegerField(null=True, help_text='')
docs_seen = models.IntegerField(null=True)
notes = models.TextField(null=True)
LDA = 'LD'
HLDA = 'HL'
DTM = 'DT'
NMF = 'NM'
BDT = 'BD'
METHOD_CHOICES = (
(LDA, 'lda'),
(HLDA, 'hlda'),
(DTM, 'dnmf'),
(NMF,'nmf'),
(BDT,'BleiDTM')
)
method = models.CharField(
max_length=2,
choices=METHOD_CHOICES,
default=NMF,
)
error = models.FloatField(null=True, default = 0)
coherence = models.FloatField(null=True)
errortype = models.TextField(null=True)
exclusivity = models.FloatField(null=True)
empty_topics = models.IntegerField(null=True)
iterations = models.IntegerField(null=True)
max_topics = models.IntegerField(null=True)
term_count = models.IntegerField(null=True)
periods = models.ManyToManyField('TimePeriod')
doc_topic_scaled_score = models.BooleanField(default=False)
dt_threshold = models.FloatField(default = 0.0005 )
dt_threshold_scaled = models.FloatField( default = 0.01)
dyn_win_threshold = models.FloatField(default = 0.1 )
def check_partitions(s):
run_id=RunStats.objects.last().pk+1
sql = """
select relname, pg_get_expr(relpartbound, oid), substring(pg_get_expr(relpartbound, oid) from '\d+')::int AS s
from pg_class
WHERE relispartition and relname~'tmv_app_doctopic'
AND pg_get_expr(relpartbound, oid)~'FOR VALUES' ORDER BY s DESC LIMIT 1;
"""
with connection.cursor() as cursor:
cursor.execute(sql)
row = cursor.fetchone()
pname = row[0]
vrange = re.findall('\d+',row[1])
sql = f"SELECT COUNT(id) FROM {row[0]}"
cursor.execute(sql)
row = cursor.fetchone()
if run_id < int(vrange[1]):
if row[0] > 10000000:
# Alter current partition
sql = f"""BEGIN TRANSACTION;
ALTER TABLE tmv_app_doctopic DETACH PARTITION {pname};
ALTER TABLE tmv_app_doctopic ATTACH PARTITION {pname}
FOR VALUES FROM ({vrange[0]}) TO ({run_id});
COMMIT TRANSACTION;"""
cursor.execute(sql)
else:
return
# Create new partition
connection.schema_editor().add_range_partition(
model=DocTopic,
name=f"pt_{run_id}",
from_values=run_id,
to_values=run_id+10000
)
else:
connection.schema_editor().add_range_partition(
model=DocTopic,
name=f"pt_{run_id}",
from_values=run_id,
to_values=run_id+10000
)
def save(self, *args, **kwargs):
self.check_partitions()
if not self.parent_run_id:
self.parent_run_id=self.run_id
super(RunStats, self).save(*args, **kwargs)
def dt_matrix(self, path, s_size=0, force_overwrite=False):
'''
Return a sparse doctopic matrix and its row and column ids
'''
# see if the required objects already exist
mpath = f"{path}/run_{self.pk}_s_{s_size}_m.npy"
rpath = f"{path}/run_{self.pk}_s_{s_size}_r_ind.npy"
cpath = f"{path}/run_{self.pk}_s_{s_size}_c_ind.npy"
if os.path.exists(mpath):
m = np.load(mpath,allow_pickle=True)[()]
if os.path.exists(rpath):
r_ind = np.load(rpath,allow_pickle=True)
if os.path.exists(cpath):
c_ind = np.load(cpath, allow_pickle=True)
if not force_overwrite:
print("We've already calculated the required matrices!")
return(m,c_ind,r_ind)
if self.method=="DT":
dts = DocDynamicTopic.objects
else:
dts = DocTopic.objects
if self.query:
doc_id_var = 'doc__id'
elif self.psearch:
if self.psearch.search_object_type==parliament.models.Search.PARAGRAPH:
doc_id_var = 'par__id'
elif self.psearch.search_object_type==parliament.models.Search.UTTERANCE:
doc_id_var = 'ut__id'
else:
print("I don't know what type of document I have...")
return
db_matrix = dts.filter(
run_id=self.pk,
score__gt=self.dt_threshold
)
docs = set(db_matrix.values_list(doc_id_var,flat=True))
if s_size >0:
s_docs = random.sample(docs,s_size)
db_matrix = dts.filter(
run_id=stat.pk,
score__gt=0.01,
doc__id__in=s_docs
)
vs = list(db_matrix.values('score',doc_id_var,'topic_id'))
c_ind = np.array(list(set(db_matrix.values_list('topic_id',flat=True).order_by(doc_id_var))))
r_ind = np.array(list(set(db_matrix.values_list(doc_id_var,flat=True).order_by(doc_id_var))))
d = [x['score'] for x in vs]
c = [int(np.where(c_ind==x['topic_id'])[0]) for x in vs]
r = [int(np.where(r_ind==x[doc_id_var])[0]) for x in vs]
m = coo_matrix((d,(r,c)),shape=(len(r_ind),len(c_ind)))
np.save(mpath, m)
np.save(rpath, r_ind)
np.save(cpath, c_ind)
return(m,c_ind,r_ind)
def calculate_tsne(self, path, p, s_size=0, force_overwrite=False):
"""
Function applied to RunStats object to calculate dimensionality reduction using TSNE
:param path: Results path
:param p:
:param s_size:
:param force_overwrite: (default: False) Overrides already existing results
:return:
"""
m, c_ind, r_ind = self.dt_matrix(path, s_size)
results_path = f"{path}/run_{self.pk}_s_{s_size}_p_{p}_results.npy"
if os.path.exists(results_path):
tsne_results = np.load(results_path, allow_pickle=True)
if not force_overwrite:
print("We've already calculated the tsne positions")
return tsne_results, r_ind
tsne = mTSNE(n_components=2, verbose=0, perplexity=p,n_jobs=4)
tsne_results = tsne.fit_transform(m.toarray())
np.save(results_path, tsne_results)
return tsne_results, r_ind
class Settings(models.Model):
"""
todo: what is this?
used in utils/db.py and BasisBrowser/db.py
"""
run_id = models.IntegerField()
doc_topic_score_threshold = models.FloatField()
doc_topic_scaled_score = models.BooleanField()
| gpl-3.0 |
petebachant/scipy | scipy/signal/windows.py | 32 | 53971 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
ephes/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
abhi252/GloVeGraphs | LINE Evaluation/evaluateLine.py | 1 | 1674 | import os
import sys
from subprocess import call
from sklearn.cluster import KMeans
from sklearn.metrics import normalized_mutual_info_score
def evaluate(filename):
#read ground truth
communities = {}
doc = open(filename, "r")
for line in doc:
try:
a = line.split()
communities[float(a[0])] = float(a[1])
except ValueError:
continue
doc.close()
noof_comm = len(set(communities.values()))
#generate embeddings
graphfname = filename.replace("community", "network")
print "Generating embeddings for " + graphfname + "..."
call(["cp", graphfname, "tmpi.txt"])
call(["sh", "sample.sh"])
#read and cluster embeddings
print "Clustering embeddings of " + graphfname + "..."
vectors = []
doc = open("vectors.txt","r")
for line in doc:
a = line.split()
tmp = []
for l in a:
tmp.append(float(l))
vectors.append(tmp)
doc.close()
del vectors[0] #remove summary line
ordered = sorted(vectors, key=lambda x: x[0])
for o in ordered:
del o[0] #remove node id
km = KMeans(n_clusters=noof_comm).fit(ordered)
#evaluating
comm_labels = []
for k in sorted(communities.keys()):
comm_labels.append(communities[k])
return normalized_mutual_info_score(comm_labels, km.labels_)
def main():
if len(sys.argv) < 2:
print "Please provide directory of graphs... Exiting..."
return
op = open("../../final/GloVeGraphs/LINE Evaluation/results.txt", "a")
dirname = sys.argv[1]
filenames = os.listdir(dirname)
nmi = []
for filename in filenames:
if filename.startswith("community"):
nmi.append(evaluate(dirname + "/" + filename))
print nmi
score = sum(nmi)/len(nmi)
op.write(dirname + " " + str(score) + "\n")
op.close()
main()
| gpl-3.0 |
arahuja/scikit-learn | sklearn/metrics/pairwise.py | 13 | 41710 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
kdebrab/pandas | pandas/core/base.py | 1 | 40146 | """
Base and utility classes for pandas objects.
"""
import warnings
import textwrap
from pandas import compat
from pandas.compat import builtins
import numpy as np
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.core.dtypes.common import (
is_datetimelike,
is_object_dtype,
is_list_like,
is_scalar,
is_extension_type,
is_extension_array_dtype)
from pandas.util._validators import validate_bool_kwarg
from pandas.errors import AbstractMethodError
from pandas.core import common as com, algorithms
import pandas.core.nanops as nanops
import pandas._libs.lib as lib
from pandas.compat.numpy import function as nv
from pandas.compat import PYPY
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.accessor import DirNamesMixin
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin, DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (getattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
getattr(self, key, None) is not None)):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.all: 'all',
np.any: 'any',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
}
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) {selection} already selected'
.format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif isinstance(obj, ABCDataFrame) and \
k not in obj.columns:
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries)
for r in compat.itervalues(result))
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame)
for r in compat.itervalues(result))
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com._get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="return the transpose, which is by "
"definition self")
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
""" return the first element of the underlying data as a python
scalar
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.values._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def max(self):
"""
Return the maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
"""
Return the minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com._maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return iter(self.tolist())
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return isna(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _map_values(self, mapper, na_action=None):
"""An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.astype(object)
values = getattr(values, 'values', values)
if na_action == 'ignore':
def map_f(values, f):
return lib.map_infer_mask(values, f,
isna(values).view(np.uint8))
else:
map_f = lib.map_infer
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.count: number of non-NA elements in a DataFrame
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
from pandas.core.algorithms import value_counts
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
return result
def unique(self):
values = self._values
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.algorithms import unique1d
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
"""
return self.nunique() == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.values)
return v
@Substitution(
values='', order='', size_hint='',
sort=textwrap.dedent("""\
sort : boolean, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""))
@Appender(algorithms._shared_docs['factorize'])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'], side='right')
array([3])
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
def drop_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(duplicated(self, keep=keep),
index=self.index).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/dtypes/test_concat.py | 4 | 2065 | # -*- coding: utf-8 -*-
import pytest
import pandas.core.dtypes.concat as _concat
from pandas import (
Index, DatetimeIndex, PeriodIndex, TimedeltaIndex, Series, Period)
@pytest.mark.parametrize('to_concat, expected', [
# int/float/str
([['a'], [1, 2]], ['i', 'object']),
([[3, 4], [1, 2]], ['i']),
([[3, 4], [1, 2.1]], ['i', 'f']),
# datetimelike
([DatetimeIndex(['2011-01-01']), DatetimeIndex(['2011-01-02'])],
['datetime']),
([TimedeltaIndex(['1 days']), TimedeltaIndex(['2 days'])],
['timedelta']),
# datetimelike object
([DatetimeIndex(['2011-01-01']),
DatetimeIndex(['2011-01-02'], tz='US/Eastern')],
['datetime', 'datetime64[ns, US/Eastern]']),
([DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
DatetimeIndex(['2011-01-02'], tz='US/Eastern')],
['datetime64[ns, Asia/Tokyo]', 'datetime64[ns, US/Eastern]']),
([TimedeltaIndex(['1 days']), TimedeltaIndex(['2 hours'])],
['timedelta']),
([DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
TimedeltaIndex(['1 days'])],
['datetime64[ns, Asia/Tokyo]', 'timedelta'])])
@pytest.mark.parametrize('klass', [Index, Series])
def test_get_dtype_kinds(klass, to_concat, expected):
to_concat_klass = [klass(c) for c in to_concat]
result = _concat.get_dtype_kinds(to_concat_klass)
assert result == set(expected)
@pytest.mark.parametrize('to_concat, expected', [
# because we don't have Period dtype (yet),
# Series results in object dtype
([PeriodIndex(['2011-01'], freq='M'),
PeriodIndex(['2011-01'], freq='M')], ['period[M]']),
([Series([Period('2011-01', freq='M')]),
Series([Period('2011-02', freq='M')])], ['object']),
([PeriodIndex(['2011-01'], freq='M'),
PeriodIndex(['2011-01'], freq='D')], ['period[M]', 'period[D]']),
([Series([Period('2011-01', freq='M')]),
Series([Period('2011-02', freq='D')])], ['object'])])
def test_get_dtype_kinds_period(to_concat, expected):
result = _concat.get_dtype_kinds(to_concat)
assert result == set(expected)
| bsd-3-clause |
lounick/task_scheduling | task_scheduling/tlpp_problem.py | 1 | 7215 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, lounick and decabyte
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of task_scheduling nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Two level planning problem (TLPP) solver.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import numpy as np
import gurobipy
def tlpp_solver(cost, salesmen=1, min_cities=None, max_cities=None, **kwargs):
"""
Multi-depot multiple traveling salesmen MILP solver for multi-robot task scheduling using the Gurobi MILP optimiser.
Points (in the cost matrix) should be ordered in a specific order. The first point is the extraction point for the
robots. The next points are the robot positions (depots) and the final points are the tasks to be visited.
:rtype : Returns tuple with the routes for each salesman from each depot, the objective value, and a model object.
:param cost: Cost matrix for travelling from point to point.
:param salesmen: Number of salesmen taking part in the solution.
:param min_cities: Optional parameter of minimum cities to be visited by each salesman.
:param max_cities: Optional parameter of maximum cities to be visited by each salesman.
"""
n = cost.shape[0]
depots = salesmen + 1
if min_cities is None:
K = 0
else:
K = min_cities
if max_cities is None:
L = n
else:
L = max_cities
x = kwargs['areas']
m = gurobipy.Model()
e_vars = {}
for i in range(n):
for j in range(n):
e_vars[i, j] = m.addVar(obj=cost[i, j], vtype=gurobipy.GRB.BINARY, name='e_' + str(i) + '_' + str(j))
m.update()
u_vars = {}
for i in range(n):
u_vars[i] = m.addVar(vtype=gurobipy.GRB.INTEGER, name='u_' + str(i))
m.update()
for i in range(n):
e_vars[i, i].ub = 0
m.update()
# From each depot to other nodes. Notice that in the final depot no-one exits.
for i in range(depots):
if i == 0:
m.addConstr(gurobipy.quicksum(e_vars[i, j] for j in range(depots, n)) == 0)
else:
# Only one salesman allowed per depot (one robot per position)
m.addConstr(gurobipy.quicksum(e_vars[i, j] for j in range(depots, n)) == 1)
m.update()
# From each node to the final depot. No-one returns to his original positions. They are forced to go to extraction.
for j in range(depots):
if j == 0:
m.addConstr(gurobipy.quicksum(e_vars[i, j] for i in range(depots, n)) == depots - 1)
else:
m.addConstr(gurobipy.quicksum(e_vars[i, j] for i in range(depots, n)) == 0)
m.update()
# For the task points someone enters
for j in range(depots, n):
m.addConstr(gurobipy.quicksum(e_vars[i, j] for i in range(n)) == 1)
m.update()
# For the task points someone exits
for i in range(depots, n):
m.addConstr(gurobipy.quicksum(e_vars[i, j] for j in range(n)) == 1)
m.update()
# Precedence constraint
for i in range(depots, n - 1):
m.addConstr(x[i] - e_vars[i, i + 1] - e_vars[i + 1, i] <= 0)
# m.addConstr(x[i] - e_vars[i, i + 1] <= 0)
# m.addConstr(u_vars[i] - u_vars[i+1] <= 1)
# m.addConstr(u_vars[i] - u_vars[i + 1] >= -1)
for i in range(depots, n):
m.addConstr(
u_vars[i] + (L - 2) * gurobipy.quicksum(e_vars[k, i] for k in range(depots)) -
gurobipy.quicksum(e_vars[i, k] for k in range(depots)) <= (L - 1)
)
m.update()
for i in range(depots, n):
m.addConstr(
u_vars[i] + gurobipy.quicksum(e_vars[k, i] for k in range(depots)) +
(2 - K) * gurobipy.quicksum(e_vars[i, k] for k in range(depots)) >= 2
)
m.update()
for k in range(depots):
for i in range(depots, n):
m.addConstr(e_vars[k, i] + e_vars[i, k] <= 1)
m.update()
for i in range(depots, n):
for j in range(depots, n):
if i != j:
m.addConstr(u_vars[i] - u_vars[j] + L * e_vars[i, j] + (L - 2) * e_vars[j, i] <= L - 1)
m.update()
m._vars = e_vars
m._uvars = u_vars
m.params.OutputFlag = int(kwargs.get('output_flag', 0))
m.params.TimeLimit = float(kwargs.get('time_limit', 60.0))
m.optimize()
solution = m.getAttr('X', e_vars)
selected = [(i, j) for i in range(n) for j in range(n) if solution[i, j] > 0.5]
routes = []
for i in range(salesmen):
routes.append([])
next_city = selected[i][0]
finished = False
while not finished:
for j in range(len(selected)):
if selected[j][0] == next_city:
routes[i].append(next_city)
next_city = selected[j][1]
break
if next_city == 0:
routes[i].append(next_city)
finished = True
return routes, m.objVal, m
def main():
import matplotlib.pyplot as plt
import task_scheduling.utils as tsu
nodes = tsu.generate_nodes()
cost = tsu.calculate_distances(nodes)
salesmen = np.random.randint(2, 4)
salesmen = 2
nodes = []
nodes = np.array([[0, 4], [-1.5, 0], [1.5, 0], [-1.5, 0], [-1.5, 1], [-0.5, 2], [-0.5, 3], [0.5, 3], [0.5, 2],[1.5, 0], [1.5, 1]])
#nodes = np.array([[0, 3], [1, 1], [-1, 1], [1, 2], [1, 3], [-1, 3], [-1, 2]])
cost = tsu.calculate_distances(nodes)
solution, objective, _ = tsu.solve_problem(tlpp_solver, cost, salesmen=salesmen, areas=[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0])
# solution, objective, _ = tsu.solve_problem(tlpp_solver, cost, salesmen=salesmen, areas=[0, 0, 0, 1, 0, 1, 0])
fig, ax = tsu.plot_problem(nodes, solution, objective)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
srippa/nn_deep | assignment2/cs231n/classifiers/neural_net.py | 2 | 5351 | import numpy as np
import matplotlib.pyplot as plt
def init_two_layer_model(input_size, hidden_size, output_size):
"""
Initialize the weights and biases for a two-layer fully connected neural
network. The net has an input dimension of D, a hidden layer dimension of H,
and performs classification over C classes. Weights are initialized to small
random values and biases are initialized to zero.
Inputs:
- input_size: The dimension D of the input data
- hidden_size: The number of neurons H in the hidden layer
- ouput_size: The number of classes C
Returns:
A dictionary mapping parameter names to arrays of parameter values. It has
the following keys:
- W1: First layer weights; has shape (D, H)
- b1: First layer biases; has shape (H,)
- W2: Second layer weights; has shape (H, C)
- b2: Second layer biases; has shape (C,)
"""
# initialize a model
model = {}
model['W1'] = 0.00001 * np.random.randn(input_size, hidden_size)
model['b1'] = np.zeros(hidden_size)
model['W2'] = 0.00001 * np.random.randn(hidden_size, output_size)
model['b2'] = np.zeros(output_size)
return model
def two_layer_net(X, model, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural network.
The net has an input dimension of D, a hidden layer dimension of H, and
performs classification over C classes. We use a softmax loss function and L2
regularization the the weight matrices. The two layer net should use a ReLU
nonlinearity after the first affine layer.
The two layer net has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- model: Dictionary mapping parameter names to arrays of parameter values.
It should contain the following:
- W1: First layer weights; has shape (D, H)
- b1: First layer biases; has shape (H,)
- W2: Second layer weights; has shape (H, C)
- b2: Second layer biases; has shape (C,)
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y not is passed, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not passed, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function. This should have the same keys as model.
"""
# unpack variables from the model dictionary
W1,b1,W2,b2 = model['W1'], model['b1'], model['W2'], model['b2']
N, D = X.shape
# compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. So that your results match ours, multiply the #
# regularization loss by 0.5 #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
# compute the gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
| mit |
ywcui1990/nupic.research | htmresearch/frameworks/capybara/supervised/analysis.py | 6 | 8027 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import os
import time
from htmresearch.frameworks.capybara.distance import \
distance_matrix, sequence_distance, reshaped_sequence_distance
from htmresearch.frameworks.capybara.embedding import \
convert_to_embeddings, reshape_embeddings
from htmresearch.frameworks.capybara.sdr import load_sdrs
from htmresearch.frameworks.capybara.supervised.classification import \
train_and_test
from htmresearch.frameworks.capybara.supervised.plot import \
plot_matrix, plot_projections, make_plot_title, make_subplots
from htmresearch.frameworks.capybara.util import \
get_logger, check_shape, indent, hours_minutes_seconds
from htmresearch.frameworks.dimensionality_reduction.proj import project_vectors
PHASES = ['train', 'test']
CELL_TYPES = ['sp', 'tm']
SP_OUT_WIDTH = 2048
TM_OUT_WIDTH = 65536
LOGGER = get_logger()
def analyze_sdr_sequences(sdr_sequences_train, sdr_sequences_test, data_id,
nb_chunks, n_neighbors, tsne, aggregation, plot_dir,
assume_sequence_alignment):
sdr_widths = {'sp': SP_OUT_WIDTH, 'tm': TM_OUT_WIDTH}
accuracies = {cell_type: {} for cell_type in CELL_TYPES}
dist_mats = {cell_type: {} for cell_type in CELL_TYPES}
embeddings = {cell_type: {} for cell_type in CELL_TYPES}
X = {cell_type: {} for cell_type in CELL_TYPES}
y = {}
# Step 1: convert the SDR sequences to "sequence embeddings" and compute the
# pair-wise sequence distances.
for phase, sdr_sequences in zip(PHASES,
[sdr_sequences_train, sdr_sequences_test]):
# Sort by label to make it easier to visualize embeddings later.
sorted_sdr_sequences = sdr_sequences.sort_values('label')
y[phase] = sorted_sdr_sequences.label.values
# Convert SDRs to embeddings.
(embeddings['sp'][phase],
embeddings['tm'][phase]) = convert_to_embeddings(sorted_sdr_sequences,
aggregation,
nb_chunks)
# Make sure the shapes are ok.
nb_sequences = len(sorted_sdr_sequences)
for cell_type in CELL_TYPES:
check_shape(embeddings[cell_type][phase], (nb_sequences, nb_chunks,
sdr_widths[cell_type]))
check_shape(embeddings[cell_type][phase], (nb_sequences, nb_chunks,
sdr_widths[cell_type]))
check_shape(y[phase], (nb_sequences,))
# Compute distance matrix.
distance = lambda a, b: sequence_distance(a, b, assume_sequence_alignment)
dist_mats['sp'][phase], dist_mats['tm'][phase], _ = distance_matrix(
embeddings['sp'][phase], embeddings['tm'][phase], distance)
# Step 2: Flatten the sequence embeddings to be able to classify each
# sequence with a supervised classifier. The classifier uses the same
# sequence distance as the distance matrix.
for cell_type in CELL_TYPES:
# Flatten embeddings.
# Note: we have to flatten X because sklearn doesn't allow for X to be > 2D.
# Here, the initial shape of X (i.e. sequence embeddings) is 3D and
# therefore has to be flattened to 2D. See the logic of reshape_embeddings()
# for details on how the embeddings are converted from 2D to 3D.
nb_sequences = len(embeddings[cell_type]['train'])
X[cell_type]['train'] = reshape_embeddings(embeddings[cell_type]['train'],
nb_sequences, nb_chunks,
sdr_widths[cell_type])
X[cell_type]['test'] = reshape_embeddings(embeddings[cell_type]['test'],
nb_sequences, nb_chunks,
sdr_widths[cell_type])
sequence_embedding_shape = (nb_chunks, sdr_widths[cell_type])
reshaped_distance = lambda a, b: reshaped_sequence_distance(
a, b, sequence_embedding_shape, assume_sequence_alignment)
# Compute train and test accuracies
(accuracies[cell_type]['train'],
accuracies[cell_type]['test']) = train_and_test(X[cell_type]['train'],
y['train'],
X[cell_type]['test'],
y['test'],
reshaped_distance,
n_neighbors)
# Step 3: plot the distance matrix and 2D projections for each cell
# type (SP or TM) and phase (train or test).
n_plots = 2 # distance matrix + 2d projection
fig, ax, plot_path = make_subplots(len(PHASES), n_plots, plot_dir, data_id,
cell_type, nb_chunks, aggregation)
for phase in PHASES:
phase_idx = PHASES.index(phase)
title = make_plot_title('Pair-wise distances', phase,
accuracies[cell_type][phase])
plot_matrix(dist_mats[cell_type][phase], title, fig, ax[phase_idx][0])
if tsne:
embeddings_proj = project_vectors(X[cell_type][phase],
reshaped_distance)
# Re-use the distance matrix to compute the 2D projections. It's faster.
# embeddings_proj = project_matrix(dist_mats[cell_type][phase])
title = make_plot_title('TSNE 2d projections', phase,
accuracies[cell_type][phase])
plot_projections(embeddings_proj, y[phase], title, fig,
ax[phase_idx][1])
fig.savefig(plot_path)
return accuracies
def run_analysis(trace_dir, data_ids, chunks, n_neighbors, tsne, aggregations,
plot_dir, assume_sequence_alignment):
if not os.path.exists(plot_dir): os.makedirs(plot_dir)
tic = time.time()
LOGGER.info('Analysis tree')
for data_id in data_ids:
LOGGER.info(indent(1) + 'load: ' + data_id)
sdr_sequences = {}
for phase in PHASES:
f_path = os.path.join(trace_dir, 'trace_%s_%s' % (data_id, phase.upper()))
sdr_sequences[phase] = load_sdrs(f_path, SP_OUT_WIDTH, TM_OUT_WIDTH)
LOGGER.info(indent(2) + 'loaded: ' + f_path)
LOGGER.info(indent(1) + 'analyze: ' + data_id)
for aggregation in aggregations:
LOGGER.info(indent(2) + 'aggregation: ' + aggregation)
for nb_chunks in chunks:
LOGGER.info(indent(3) + 'nb_chunks: ' + str(nb_chunks))
accuracies = analyze_sdr_sequences(
sdr_sequences['train'].copy(), sdr_sequences['test'].copy(), data_id,
nb_chunks, n_neighbors, tsne, aggregation, plot_dir,
assume_sequence_alignment)
for cell_type, train_test_acc in accuracies.items():
for phase, acc in train_test_acc.items():
LOGGER.info(indent(4) + '%s %s accuracy: %s /100'
% (cell_type.upper(), phase, acc))
toc = time.time()
td = datetime.timedelta(seconds=(toc - tic))
LOGGER.info('Elapsed time: %dh %02dm %02ds' % hours_minutes_seconds(td))
| agpl-3.0 |
perimosocordiae/scipy | scipy/signal/wavelets.py | 16 | 14046 | import numpy as np
from scipy.linalg import eig
from scipy.special import comb
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.empty((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
output = np.exp(1j * w * x)
if complete:
output -= np.exp(-0.5 * (w**2))
output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def morlet2(M, s, w=5):
"""
Complex Morlet wavelet, designed to work with `cwt`.
Returns the complete version of morlet wavelet, normalised
according to `s`::
exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
Parameters
----------
M : int
Length of the wavelet.
s : float
Width parameter of the wavelet.
w : float, optional
Omega0. Default is 5
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet : Implementation of Morlet wavelet, incompatible with `cwt`
Notes
-----
.. versionadded:: 1.4.0
This function was designed to work with `cwt`. Because `morlet2`
returns an array of complex numbers, the `dtype` argument of `cwt`
should be set to `complex128` for best results.
Note the difference in implementation with `morlet`.
The fundamental frequency of this wavelet in Hz is given by::
f = w*fs / (2*s*np.pi)
where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
Similarly we can get the wavelet width parameter at ``f``::
s = w*fs / (2*f*np.pi)
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> M = 100
>>> s = 4.0
>>> w = 2.0
>>> wavelet = signal.morlet2(M, s, w)
>>> plt.plot(abs(wavelet))
>>> plt.show()
This example shows basic use of `morlet2` with `cwt` in time-frequency
analysis:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t, dt = np.linspace(0, 1, 200, retstep=True)
>>> fs = 1/dt
>>> w = 6.
>>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
>>> freq = np.linspace(1, fs/2, 100)
>>> widths = w*fs / (2*freq*np.pi)
>>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
>>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud')
>>> plt.show()
"""
x = np.arange(0, M) - (M - 1.0) / 2
x = x / s
wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
output = np.sqrt(1/s) * wavelet
return output
def cwt(data, wavelet, widths, dtype=None, **kwargs):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter. The `wavelet` function
is allowed to be complex.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
dtype : data-type, optional
The desired data type of output. Defaults to ``float64`` if the
output of `wavelet` is real and ``complex128`` if it is complex.
.. versionadded:: 1.4.0
kwargs
Keyword arguments passed to wavelet function.
.. versionadded:: 1.4.0
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
.. versionadded:: 1.4.0
For non-symmetric, complex-valued wavelets, the input signal is convolved
with the time-reversed complex-conjugate of the wavelet data [1].
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
**kwargs))[::-1], mode='same')
References
----------
.. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
Academic Press, 2009.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
if wavelet == ricker:
window_size = kwargs.pop('window_size', None)
# Determine output type
if dtype is None:
if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
dtype = np.complex128
else:
dtype = np.float64
output = np.empty((len(widths), len(data)), dtype=dtype)
for ind, width in enumerate(widths):
N = np.min([10 * width, len(data)])
# the conditional block below and the window_size
# kwarg pop above may be removed eventually; these
# are shims for 32-bit arch + NumPy <= 1.14.5 to
# address gh-11095
if wavelet == ricker and window_size is None:
ceil = np.ceil(N)
if ceil != N:
N = int(N)
wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
output[ind] = convolve(data, wavelet_data, mode='same')
return output
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/kernel_approximation.py | 41 | 17976 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| mit |
hainm/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
myth/trashcan | tdt4300/exercise4/dbscan.py | 1 | 1451 | # -*- coding: utf-8 -*-
import numpy as np
from sys import argv
EPS = argv[2]
MINPTS = 3
POINTS = np.array([
[2.0,4.0],
[4.0,17.0],
[5.0,14.0],
[5.0,7.0],
[5.0,4.0],
[6.0,19.0],
[7.0,17.0],
[7.0,4.0],
[8.0,18.0],
[9.0,15.0],
[9.0,4.0],
[12.0,12.0],
[12.0,9.0],
[14.0,13.0],
[14.0,11.0],
[15.0,8.0],
[16.0,13.0],
[17.0,11.0]
])
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.datasets.samples_generator import make_blobs
db = DBSCAN(eps=EPS, min_samples=MINPTS, metric='euclidean').fit(POINTS)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
uniq_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(uniq_labels)))
for k, col in zip(uniq_labels, colors):
if k == -1:
col = 'k'
class_member_mask = (labels == k)
xy = POINTS[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = POINTS[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title(argv[1])
savefig("clustering-%s" % argv[1].replace('.',''), ext="png", close=True, verbose=True)
| gpl-2.0 |
zfrenchee/pandas | pandas/tests/frame/test_axis_select_reindex.py | 2 | 43348 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isna)
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas.errors import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
assert obj.index.name == 'first'
assert obj.columns.name == 'second'
assert list(df.columns) == ['d', 'e', 'f']
pytest.raises(ValueError, df.drop, ['g'])
pytest.raises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
# GH 16398
dropped = df.drop([], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.loc[[1, 2], :])
pytest.raises(ValueError, simple.drop, 5)
pytest.raises(ValueError, simple.drop, 'C', 1)
pytest.raises(ValueError, simple.drop, [1, 5])
pytest.raises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.loc[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's (GH12392)
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.drop('a')
res2 = df.drop(index='a')
tm.assert_frame_equal(res1, res2)
res1 = df.drop('d', 1)
res2 = df.drop(columns='d')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(labels='e', axis=1)
res2 = df.drop(columns='e')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0)
res2 = df.drop(index=['a'])
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0).drop(['d'], axis=1)
res2 = df.drop(index=['a'], columns=['d'])
tm.assert_frame_equal(res1, res2)
with pytest.raises(ValueError):
df.drop(labels='a', index='b')
with pytest.raises(ValueError):
df.drop(labels='a', columns='b')
with pytest.raises(ValueError):
df.drop(axis=1)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(newFrame):
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(nonContigFrame):
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
assert newFrame.index is self.frame.index
# length zero
newFrame = self.frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(self.frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
assert len(newFrame.index) == len(self.frame.index)
assert len(newFrame.columns) == len(self.frame.columns)
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
tm.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
assert result is not self.frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
assert df.index.name == 'iname'
df = df.reindex(Index(np.arange(10), name='tmpname'))
assert df.index.name == 'tmpname'
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
assert df.columns.name == 'iname'
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
assert smaller['A'].dtype == np.int64
bigger = smaller.reindex(self.intframe.index)
assert bigger['A'].dtype == np.float64
smaller = self.intframe.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
tm.assert_series_equal(new_frame['B'], self.frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
new_frame = self.frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method='ffill')
expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method='bfill')
expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
pytest.raises(ValueError, df.reindex, index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]},
index=[0, 1, 3])
result = df.reindex([0, 1, 3])
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis='index')
assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5],
"C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]})
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis=1)
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis='index')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis='columns')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis='columns')
with tm.assert_raises_regex(TypeError, 'Cannot specify all'):
df.reindex([0, 1], [0], ['A'])
# Mixing styles
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
# Duplicates
with tm.assert_raises_regex(TypeError, "multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=['A'])
expected = pd.DataFrame({"A": [1, 2]})
assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(['b', 'a'], ['e', 'd'])
assert 'reindex' in str(m[0].message)
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_align(self):
af, bf = self.frame.align(self.frame)
assert af._data is not self.frame._data
af, bf = self.frame.align(self.frame, copy=False)
assert af._data is self.frame._data
# axis = 0
other = self.frame.iloc[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.iloc[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, self.frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
self.frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
assert isinstance(right, Series)
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.iloc[0:4, :10]
right = self.frame.iloc[2:, 6:]
empty = self.frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# Items
filtered = self.frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
assert len(filtered.columns) == 2
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
# pass in None
with tm.assert_raises_regex(TypeError, 'Must pass'):
self.frame.filter()
with tm.assert_raises_regex(TypeError, 'Must pass'):
self.frame.filter(items=None)
with tm.assert_raises_regex(TypeError, 'Must pass'):
self.frame.filter(axis=1)
# test mutually exclusive arguments
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$')
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi')
# objects
filtered = self.mixed_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
assert 'C' in filtered
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
@pytest.mark.parametrize('name,expected', [
('a', DataFrame({u'a': [1, 2]})),
(u'a', DataFrame({u'a': [1, 2]})),
(u'あ', DataFrame({u'あ': [3, 4]}))
])
def test_filter_unicode(self, name, expected):
# GH13101
df = DataFrame({u'a': [1, 2], u'あ': [3, 4]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
@pytest.mark.parametrize('name', ['a', u'a'])
def test_filter_bytestring(self, name):
# GH13101
df = DataFrame({b'a': [1, 2], b'b': [3, 4]})
expected = DataFrame({b'a': [1, 2]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
# deprecated: gh-12410
f = lambda x: x.weekday() == 2
index = self.tsframe.index[[f(x) for x in self.tsframe.index]]
expected_weekdays = self.tsframe.reindex(index=index)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.tsframe.select(f, axis=0)
assert_frame_equal(result, expected_weekdays)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# replacement
f = lambda x: x.weekday == 2
result = self.tsframe.loc(axis=0)[f(self.tsframe.index)]
assert_frame_equal(result, expected_weekdays)
crit = lambda x: x in ['B', 'D']
result = self.frame.loc(axis=1)[(self.frame.columns.map(crit))]
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# doc example
df = DataFrame({'A': [1, 2, 3]}, index=['foo', 'bar', 'baz'])
crit = lambda x: x in ['bar', 'baz']
with tm.assert_produces_warning(FutureWarning):
expected = df.select(crit)
result = df.loc[df.index.map(crit)]
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# negative indices
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=True, axis=0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=False, axis=0)
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
pytest.raises(IndexError, df.take, [3, 1, 2, 30], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, -31], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, 5], axis=1)
pytest.raises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# negative indices
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=lrange(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
pytest.raises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
newFrame = self.frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(2), columns=lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
| bsd-3-clause |
blancha/abcngspipelines | alignment/bwamem.py | 1 | 2998 | #!/usr/bin/env python3
# Version 1.1
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
import glob
import os
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates bwa mem scripts.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory.", default="bwa")
parser.add_argument("-i", "--inputDirectory", help="Input directory with FASTQ files.", default="../data/FASTQ_files/untrimmed/")
parser.add_argument("-o", "--outputDirectory", help="Output directory with bwa results.", default="../results/bwa/")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
util.cdMainScriptsDirectory()
# Process the command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
genome = config.get("project", "genome")
genomeFile = config.get(genome, "genomeFile")
bwaIndex = config.get(genome, "bwaIndex")
threads = config.get("bwamem", "threads")
# Read samples file
samplesFile = util.readsamplesFile()
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Create output directories, if they do not exist yet..
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
# Cycle through all the samples and write the bwa scripts.
for index, row in samplesFile.iterrows():
sample = row["sample"]
if "lane" in samplesFile.columns:
sample= sample + "_lane_" + str(row["lane"])
# Create output directories
if not os.path.exists(outputDirectory + "/" + sample):
os.mkdir(outputDirectory +"/" + sample)
file_R1 = row["file_r1"]
file_R2 = row["file_r2"]
# Create script file.
scriptName = 'bwa_' + sample + '.sh'
script = open(scriptName, 'w')
if header:
util.writeHeader(script, config, "bwamem")
script.write("bwa mem -M" + " \\\n")
script.write("-t " + threads + " \\\n")
script.write("-R '@RG\\tID:" + sample + "\\tSM:" + row["sample"] + "\\tPL:Illumina\\tLB:lib1\\tPU:unit1'" + " \\\n");
script.write(bwaIndex + " \\\n")
script.write(inputDirectory + "/" + file_R1 + " \\\n")
script.write(inputDirectory + "/" + file_R2 + " \\\n")
script.write("1> " + outputDirectory + "/" + sample + "/" + sample + ".sam " + "\\\n")
script.write("2> " + scriptName + ".log")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 |
Obus/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
lenovor/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
cpcloud/pepdata | pepdata/reference.py | 1 | 5766 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle
from gzip import GzipFile
import re
import Bio.SeqIO
import pandas as pd
from progressbar import ProgressBar
from common import (
int_or_seq, dataframe_from_counts, bad_amino_acids,
fetch_file, fetch_and_transform
)
BASE_URL = "ftp://ftp.ensembl.org"
FTP_DIR = "/pub/release-75/fasta/homo_sapiens/pep/"
FASTA_FILENAME = "Homo_sapiens.GRCh37.75.pep.all.fa"
GZ_FILENAME = FASTA_FILENAME + ".gz"
FULL_URL = BASE_URL + FTP_DIR + GZ_FILENAME
def load_dataframe():
"""
Loads the protein products of the reference genome
in a dataframe with columns:
- protein : amino acid string
- protein_id
- gene_id
- transcript_id
"""
filename = fetch_file(FASTA_FILENAME, FULL_URL)
sequences = []
protein_ids = []
gene_ids = []
transcript_ids = []
gene_pattern = re.compile('gene:(ENSG[0-9]*)')
transcript_pattern = re.compile('transcript:(ENST[0-9]*)')
with open(filename, 'r') as f:
for record in Bio.SeqIO.parse(f, 'fasta'):
protein_ids.append(record.id)
sequences.append(str(record.seq))
desc = record.description
gene_matches = gene_pattern.findall(desc)
if gene_matches:
gene_id = gene_matches[0]
else:
gene_id = None
gene_ids.append(gene_id)
transcript_matches = transcript_pattern.findall(desc)
if transcript_matches:
transcript_id = transcript_matches[0]
else:
transcript_id = None
transcript_ids.append(transcript_id)
df = pd.DataFrame({
'protein' : sequences,
'gene_id' : gene_ids,
'protein_id' : protein_ids,
'transcript_id': transcript_ids})
return df
#if filter_amino_acids:
# return df.ix[df.protein.str.contains(bad_amino_acids)]
#else:
# return df
def _generate_counts(src_filename, peptide_lengths, nrows):
epitope_counts = {}
get_count = epitope_counts.get
with open(src_filename, 'r') as f:
seqs = [str(record.seq) for record in Bio.SeqIO.parse(f, "fasta")]
print "Generating substrings of length %s" % (peptide_lengths,)
pbar = ProgressBar(maxval = len(seqs)).start()
for seq_num, seq in enumerate(seqs):
seq_len = len(seq)
if nrows and seq_num > nrows:
break
for size in peptide_lengths:
for i in xrange(seq_len - size + 1):
epitope = seq[i:i+size]
if epitope in epitope_counts:
epitope_counts[epitope] += 1
else:
epitope_counts[epitope] = 1
pbar.update(seq_num+1)
pbar.finish()
return dataframe_from_counts(epitope_counts)
def _generate_set(src_filename, peptide_lengths, nrows):
peptides = set([])
with open(src_filename, 'r') as f:
seqs = [str(record.seq) for record in Bio.SeqIO.parse(f, "fasta")]
print "Generating substrings of length %s" % (peptide_lengths,)
pbar = ProgressBar(maxval = len(seqs)).start()
for seq_num, seq in enumerate(seqs):
if nrows and seq_num > nrows:
break
for size in peptide_lengths:
for i in xrange(len(seq) - size + 1):
peptides.add(seq[i:i+size])
pbar.update(seq_num+1)
pbar.finish()
return peptides
def load_peptide_counts(peptide_length = [8,9,10,11], nrows = None):
"""
List of all reference peptides encoded in a reference human exome
"""
peptide_lengths = int_or_seq(peptide_length)
lens = "_".join(str(n) for n in peptide_lengths)
cache_filename = \
"reference_peptide_counts_" + lens + "_nrows_" + str(nrows) + ".csv"
def save_counts(src_path, dst_path):
counts = _generate_counts(src_path, peptide_lengths, nrows)
print "Saving %s" % dst_path
counts.to_csv(dst_path)
return counts
return fetch_and_transform(
transformed_filename = cache_filename,
transformer = save_counts,
loader = pd.read_csv,
source_filename = FASTA_FILENAME,
source_url = FULL_URL)
def load_peptide_set(peptide_length = [8,9,10,11], nrows = None):
peptide_lengths = int_or_seq(peptide_length)
lens = "_".join(str(n) for n in peptide_lengths)
cache_filename = \
"reference_peptide_set_" + lens + "_nrows_" + str(nrows) + ".pickle.gz"
def save_set(src_path, dst_path):
string_set = _generate_set(src_path, peptide_lengths, nrows)
with GzipFile(dst_path, 'w') as out_file:
out_file.write(cPickle.dumps(string_set))
return string_set
def load_set(path):
result = None
with GzipFile(path, 'r') as in_file:
result = cPickle.loads(in_file.read())
return result
return fetch_and_transform(
transformed_filename = cache_filename,
transformer = save_set,
loader = load_set,
source_filename = FASTA_FILENAME,
source_url = FULL_URL)
| apache-2.0 |
perrygeo/geopandas | examples/nyc_boros.py | 8 | 1394 | """
Generate example images for GeoPandas documentation.
TODO: autogenerate these from docs themselves
Kelsey Jordahl
Time-stamp: <Tue May 6 12:17:29 EDT 2014>
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
np.random.seed(1)
DPI = 100
# http://www.nyc.gov/html/dcp/download/bytes/nybb_14aav.zip
boros = GeoDataFrame.from_file('nybb.shp')
boros.set_index('BoroCode', inplace=True)
boros.sort()
boros.plot()
plt.xticks(rotation=90)
plt.savefig('nyc.png', dpi=DPI, bbox_inches='tight')
#plt.show()
boros.geometry.convex_hull.plot()
plt.xticks(rotation=90)
plt.savefig('nyc_hull.png', dpi=DPI, bbox_inches='tight')
#plt.show()
N = 2000 # number of random points
R = 2000 # radius of buffer in feet
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
#xmin, xmax, ymin, ymax = 900000, 1080000, 120000, 280000
xc = (xmax - xmin) * np.random.random(N) + xmin
yc = (ymax - ymin) * np.random.random(N) + ymin
pts = GeoSeries([Point(x, y) for x, y in zip(xc, yc)])
mp = pts.buffer(R).unary_union
boros_with_holes = boros.geometry - mp
boros_with_holes.plot()
plt.xticks(rotation=90)
plt.savefig('boros_with_holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
holes = boros.geometry & mp
holes.plot()
plt.xticks(rotation=90)
plt.savefig('holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
| bsd-3-clause |
rvraghav93/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/stats/_binned_statistic.py | 1 | 25264 | from __future__ import division, print_function, absolute_import
import warnings
from collections import namedtuple
import numpy as np
from scipy._lib.six import callable, xrange
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if (statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = {}
for i in xrange(Ndim):
sampBin[i] = np.digitize(sample[:, i], edges[i])
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
# `binnumbers` is which bin (in linearized `Ndim` space) each sample goes
binnumbers = np.zeros(Dlen, int)
for i in xrange(0, Ndim - 1):
binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()
binnumbers += sampBin[ni[-1]]
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, np.sort(nbin)))
for i in xrange(nbin.size):
j = ni.argsort()[i]
# Accomodate the extra `Vdim` dimension-zero with `+1`
result = result.swapaxes(i + 1, j + 1)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if (expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin - 2))
return BinnedStatisticddResult(result, edges, binnumbers)
| mit |
farhaanbukhsh/sympy | sympy/interactive/tests/test_ipythonprinting.py | 21 | 6055 | """Tests that the IPython printing module is properly loaded. """
from sympy.core.compatibility import u
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'),
"{n_i: 3, pi: 3.14}",
u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}'))
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : In Python 3 the text is unicode, but in 2 it is a string.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
# issue 9799
app.run_cell("from sympy import Piecewise, Symbol, Eq")
app.run_cell("x = Symbol('x'); pw = format(Piecewise((1, Eq(x, 0)), (0, True)))")
| bsd-3-clause |
rgommers/scipy | scipy/interpolate/interpolate.py | 10 | 99598 | __all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
ravel, poly1d, asarray, intp)
import scipy.special as spec
from scipy.special import comb
from scipy._lib._util import prod
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def lagrange(x, w):
r"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e., f(`x`).
Returns
-------
lagrange : `numpy.poly1d` instance
The Lagrange interpolating polynomial.
Examples
--------
Interpolate :math:`f(x) = x^3` by 3 points.
>>> from scipy.interpolate import lagrange
>>> x = np.array([0, 1, 2])
>>> y = x**3
>>> poly = lagrange(x, y)
Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
it is given by
.. math::
\begin{aligned}
L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
&= x (-2 + 3x)
\end{aligned}
>>> from numpy.polynomial.polynomial import Polynomial
>>> Polynomial(poly).coef
array([ 3., -2., 0.])
"""
M = len(x)
p = poly1d(0.0)
for j in range(M):
pt = poly1d(w[j])
for k in range(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d:
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)`` which returns a scalar value `z`. This class returns a
function whose call method uses spline interpolation to find the value
of new points.
If `x` and `y` represent a regular grid, consider using
`RectBivariateSpline`.
If `z` is a vector value, consider using `interpn`.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multidimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multidimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated via nearest-neighbor extrapolation.
See Also
--------
RectBivariateSpline :
Much faster 2-D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : 1-D version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
interpolation_types = {'linear': 1, 'cubic': 3, 'quintic': 5}
try:
kx = ky = interpolation_types[kind]
except KeyError as e:
raise ValueError(
f"Unsupported interpolation type {repr(kind)}, must be "
f"either of {', '.join(map(repr, interpolation_types))}."
) from e
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1-D array
x-coordinates of the mesh on which to interpolate.
y : 1-D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2-D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x, kind="mergesort")
y = np.sort(y, kind="mergesort")
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, str) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string or as an integer
specifying the order of the spline interpolator to use.
The string has to be one of 'linear', 'nearest', 'nearest-up', 'zero',
'slinear', 'quadratic', 'cubic', 'previous', or 'next'. 'zero',
'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
zeroth, first, second or third order; 'previous' and 'next' simply
return the previous or next value of the point; 'nearest-up' and
'nearest' differ when interpolating half-integers (e.g. 0.5, 1.5)
in that 'nearest-up' rounds up and 'nearest' rounds down. Default
is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless ``fill_value="extrapolate"``.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Attributes
----------
fill_value
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Notes
-----
Calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Input values `x` and `y` must be convertible to `float` values like
`int` or `float`.
If the values in `x` are not unique, the resulting behavior is
undefined and specific to the choice of `kind`, i.e., changing
`kind` will change the behavior for duplicates.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1-D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest', 'nearest-up', 'previous',
'next'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x, kind="mergesort")
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: https://docs.python.org/reference/datamodel.html
if kind in ('linear', 'nearest', 'nearest-up', 'previous', 'next'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self._side = 'left'
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'nearest-up':
# Do division before addition to prevent possible integer
# overflow
self._side = 'right'
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'previous':
# Side for np.searchsorted and index for clipping
self._side = 'left'
self._ind = 0
# Move x by one floating point value to the left
self._x_shift = np.nextafter(self.x, -np.inf)
self._call = self.__class__._call_previousnext
elif kind == 'next':
self._side = 'right'
self._ind = 1
# Move x by one floating point value to the right
self._x_shift = np.nextafter(self.x, np.inf)
self._call = self.__class__._call_previousnext
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
mask = np.isnan(self.x)
if mask.any():
sx = self.x[~mask]
if sx.size == 0:
raise ValueError("`x` array is all-nan")
xx = np.linspace(np.nanmin(self.x),
np.nanmax(self.x),
len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
"""The fill value."""
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the original data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbor interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbor
x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_previousnext(self, x_new):
"""Use previous/next neighbor of x_new, y_new = f(x_new)."""
# 1. Get index of left/right value
x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
# 2. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(1-self._ind,
len(self.x)-self._ind).astype(intp)
# 3. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices+self._ind-1]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase:
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
``self.x`` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
``self.x`` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("Shapes of x {} and c {} are incompatible"
.format(x.shape, c.shape))
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("Shapes of c {} and self.c {} are incompatible"
.format(c.shape, self.c.shape))
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals.
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-D array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e., compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e., compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.solve()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in NumPy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in range(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(bp, BPoly):
raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
"Got %s instead." % type(bp))
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-D array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature,
see for example [1]_ [2]_ [3]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e., compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e., compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(pp, PPoly):
raise TypeError(".from_power_basis only accepts PPoly instances. "
"Got %s instead." % type(pp))
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1-D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
``x = 1`` and ``x = 2``.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError as e:
raise ValueError(
"Using a 1-D array for y? Please .reshape(-1, 1)."
) from e
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (int, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on ``[xa, xb]`` and having the values and derivatives at the
endpoints `xa` and `xb` as specified by `ya`` and `yb`.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of `ya` and `yb` are `na` and `nb`, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at `xa`. `ya[0]` is the value of the function, and
`ya[i]` for ``i > 0`` is the value of the ``i``th derivative.
yb : array_like
Derivatives at `xb`.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At ``x = xb`` it's the same with ``a = n - q``.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('Shapes of ya {} and yb {} are incompatible'
.format(ya.shape, yb.shape))
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1-D
d : integer
Returns
-------
array
coefficient array, 1-D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly:
"""
Piecewise tensor product polynomial
The value at point ``xp = (x', y', z', ...)`` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
derivative
antiderivative
integrate
integrate_1d
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1-D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[tuple(sl)]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[tuple(sl)]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1-D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1-D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1D, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1-D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator:
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbor interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
If any of `points` have a dimension of size 1, linear interpolation will
return an array of `nan` values. Nearest-neighbor interpolation will work
as usual in this case.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3-D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
>>> data = f(xg, yg, zg)
``data`` is now a 3-D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self.values[tuple(idx_res)]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a regular 3-D grid:
>>> from scipy.interpolate import interpn
>>> def value_func_3d(x, y, z):
... return 2 * x + 3 * y - z
>>> x = np.linspace(0, 4, 5)
>>> y = np.linspace(0, 5, 6)
>>> z = np.linspace(0, 6, 7)
>>> points = (x, y, z)
>>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
Evaluate the interpolating function at a point
>>> point = np.array([2.21, 3.12, 1.15])
>>> print(interpn(points, values, point))
[12.63]
See also
--------
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method splinef2d can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method splinef2d does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method splinef2d can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
if bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class _ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("_ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in range(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
| bsd-3-clause |
sfepy/sfepy | script/gen_lobatto1d_c.py | 5 | 7664 | #!/usr/bin/env python
"""
Generate lobatto1d.c and lobatto1h.c files.
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
import os
from argparse import ArgumentParser
import sympy as sm
import numpy as nm
import matplotlib.pyplot as plt
from sfepy import top_dir
from sfepy.base.ioutils import InDir
hdef = 'float64 %s(float64 x);\n'
cdef = """
float64 %s(float64 x)
{
return(%s);
}
"""
fun_list = """
const fun %s[%d] = {%s};
"""
def gen_lobatto(max_order):
assert max_order > 2
x = sm.symbols('x')
lobs = [0, 1]
lobs[0] = (1 - x) / 2
lobs[1] = (1 + x) / 2
dlobs = [lob.diff('x') for lob in lobs]
legs = [sm.legendre(0, 'y')]
clegs = [sm.ccode(legs[0])]
dlegs = [sm.legendre(0, 'y').diff('y')]
cdlegs = [sm.ccode(dlegs[0])]
clobs = [sm.ccode(lob) for lob in lobs]
cdlobs = [sm.ccode(dlob) for dlob in dlobs]
denoms = [] # for lobs.
for ii in range(2, max_order + 1):
coef = sm.sympify('sqrt(2 * (2 * %s - 1)) / 2' % ii)
leg = sm.legendre(ii - 1, 'y')
pleg = leg.as_poly()
coefs = pleg.all_coeffs()
denom = max(sm.denom(val) for val in coefs)
cleg = sm.ccode(sm.horner(leg*denom)/denom)
dleg = leg.diff('y')
cdleg = sm.ccode(sm.horner(dleg*denom)/denom)
lob = sm.simplify(coef * sm.integrate(leg, ('y', -1, x)))
lobnc = sm.simplify(sm.integrate(leg, ('y', -1, x)))
plobnc = lobnc.as_poly()
coefs = plobnc.all_coeffs()
denom = sm.denom(coef) * max(sm.denom(val) for val in coefs)
clob = sm.ccode(sm.horner(lob*denom)/denom)
dlob = lob.diff('x')
cdlob = sm.ccode(sm.horner(dlob*denom)/denom)
legs.append(leg)
clegs.append(cleg)
dlegs.append(dleg)
cdlegs.append(cdleg)
lobs.append(lob)
clobs.append(clob)
dlobs.append(dlob)
cdlobs.append(cdlob)
denoms.append(denom)
coef = sm.sympify('sqrt(2 * (2 * %s - 1)) / 2' % (max_order + 1))
leg = sm.legendre(max_order, 'y')
pleg = leg.as_poly()
coefs = pleg.all_coeffs()
denom = max(sm.denom(val) for val in coefs)
cleg = sm.ccode(sm.horner(leg*denom)/denom)
dleg = leg.diff('y')
cdleg = sm.ccode(sm.horner(dleg*denom)/denom)
legs.append(leg)
clegs.append(cleg)
dlegs.append(dleg)
cdlegs.append(cdleg)
kerns = []
ckerns = []
dkerns = []
cdkerns = []
for ii, lob in enumerate(lobs[2:]):
kern = sm.simplify(lob / (lobs[0] * lobs[1]))
dkern = kern.diff('x')
denom = denoms[ii] / 4
ckern = sm.ccode(sm.horner(kern*denom)/denom)
cdkern = sm.ccode(sm.horner(dkern*denom)/denom)
kerns.append(kern)
ckerns.append(ckern)
dkerns.append(dkern)
cdkerns.append(cdkern)
return (legs, clegs, dlegs, cdlegs,
lobs, clobs, dlobs, cdlobs,
kerns, ckerns, dkerns, cdkerns,
denoms)
def plot_polys(fig, polys, var_name='x'):
plt.figure(fig)
plt.clf()
x = sm.symbols(var_name)
vx = nm.linspace(-1, 1, 100)
for ii, poly in enumerate(polys):
print(ii)
print(poly)
print(poly.as_poly(x).all_coeffs())
vy = [float(poly.subs(x, xx)) for xx in vx]
plt.plot(vx, vy)
def append_declarations(out, cpolys, comment, cvar_name, shift=0):
names = []
out.append('\n// %s functions.\n' % comment)
for ii, cpoly in enumerate(cpolys):
name = '%s_%03d' % (cvar_name, ii + shift)
function = hdef % name
out.append(function)
names.append(name)
return names
def append_polys(out, cpolys, comment, cvar_name, var_name='x', shift=0):
names = []
out.append('\n// %s functions.\n' % comment)
for ii, cpoly in enumerate(cpolys):
name = '%s_%03d' % (cvar_name, ii + shift)
function = cdef % (name, cpoly.replace(var_name, 'x'))
out.append(function)
names.append(name)
return names
def append_lists(out, names, length):
args = ', '.join(['&%s' % name for name in names])
name = names[0][:-4]
_list = fun_list % (name, length, args)
out.append(_list)
helps = {
'max_order' :
'maximum order of polynomials [default: %(default)s]',
'plot' :
'plot polynomials',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-m', '--max-order', metavar='order', type=int,
action='store', dest='max_order',
default=10, help=helps['max_order'])
parser.add_argument('--plot',
action='store_true', dest='plot',
default=False, help=helps['plot'])
options = parser.parse_args()
max_order = options.max_order
(legs, clegs, dlegs, cdlegs,
lobs, clobs, dlobs, cdlobs,
kerns, ckerns, dkerns, cdkerns,
denoms) = gen_lobatto(max_order)
if options.plot:
plot_polys(1, lobs)
plot_polys(11, dlobs)
plot_polys(2, kerns)
plot_polys(21, dkerns)
plot_polys(3, legs, var_name='y')
plot_polys(31, dlegs, var_name='y')
plt.show()
indir = InDir(os.path.join(top_dir, 'sfepy/discrete/fem/extmods/'))
fd = open(indir('lobatto1d_template.h'), 'r')
template = fd.read()
fd.close
fd = open(indir('lobatto1d.h'), 'w')
out = []
append_declarations(out, clobs, 'Lobatto', 'lobatto')
append_declarations(out, cdlobs, 'Derivatives of Lobatto', 'd_lobatto')
append_declarations(out, ckerns, 'Kernel', 'kernel',
shift=2)
append_declarations(out, cdkerns, 'Derivatives of kernel', 'd_kernel',
shift=2)
append_declarations(out, clegs, 'Legendre', 'legendre')
append_declarations(out, cdlegs, 'Derivatives of Legendre', 'd_legendre')
fd.write(template.replace('// REPLACE_TEXT', ''.join(out)))
fd.close()
fd = open(indir('lobatto1d_template.c'), 'r')
template = fd.read()
fd.close()
fd = open(indir('lobatto1d.c'), 'w')
out = []
names_lobatto = append_polys(out, clobs,
'Lobatto', 'lobatto')
names_d_lobatto = append_polys(out, cdlobs,
'Derivatives of Lobatto', 'd_lobatto')
names_kernel = append_polys(out, ckerns,
'Kernel', 'kernel',
shift=2)
names_d_kernel = append_polys(out, cdkerns,
'Derivatives of kernel', 'd_kernel',
shift=2)
names_legendre = append_polys(out, clegs,
'Legendre', 'legendre',
var_name='y')
names_d_legendre = append_polys(out, cdlegs,
'Derivatives of Legendre', 'd_legendre',
var_name='y')
out.append('\n// Lists of functions.\n')
out.append('\nconst int32 max_order = %d;\n' % max_order)
append_lists(out, names_lobatto, max_order + 1)
append_lists(out, names_d_lobatto, max_order + 1)
append_lists(out, names_kernel, max_order - 1)
append_lists(out, names_d_kernel, max_order - 1)
append_lists(out, names_legendre, max_order + 1)
append_lists(out, names_d_legendre, max_order + 1)
fd.write(template.replace('// REPLACE_TEXT', ''.join(out)))
fd.close()
if __name__ == '__main__':
main()
| bsd-3-clause |
biocore/qiime | scripts/make_2d_plots.py | 15 | 12125 | #!/usr/bin/env python
# File created on 09 Feb 2010
# file make_2d_plots.py
from __future__ import division
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jesse Stombaugh", "Jose Antonio Navas Molina", "John Chase"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
from matplotlib import use
use('Agg', warn=False)
import matplotlib
import re
from qiime.util import parse_command_line_parameters, get_options_lookup
from qiime.util import make_option
from qiime.make_2d_plots import generate_2d_plots, get_coord
from qiime.parse import parse_coords, group_by_field, group_by_fields
import shutil
import os
from qiime.colors import sample_color_prefs_and_map_data_from_options
from qiime.util import get_qiime_project_dir, load_pcoa_files
from qiime.make_2d_plots import get_coord
from tempfile import mkdtemp
options_lookup = get_options_lookup()
# make_2d_plots.py
script_info = {}
script_info['brief_description'] = """Make 2D PCoA Plots"""
script_info[
'script_description'] = """This script generates 2D PCoA plots using the principal coordinates file generated by performing beta diversity measures of an OTU table."""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Default Example:""",
"If you just want to use the default output, you can supply the principal "
"coordinates file (i.e., resulting file from principal_coordinates.py), where "
"the default coloring will be based on the SampleID as follows:",
"""%prog -i unweighted_unifrac_pc.txt -m Fasting_Map.txt"""))
script_info['script_usage'].append(("""Output Directory Usage:""",
"If you want to give an specific output directory (e.g. \"2d_plots\"), use the "
"following code.",
"""%prog -i unweighted_unifrac_pc.txt -m Fasting_Map.txt -o 2d_plots/"""))
script_info['script_usage'].append(("""Mapping File Usage:""",
"Additionally, the user can supply their mapping file ('-m') and a specific "
"category to color by ('-b') or any combination of categories. When using the "
"-b option, the user can specify the coloring for multiple mapping labels, where "
"each mapping label is separated by a comma, for example: "
"-b \'mapping_column1,mapping_column2\'. The user can also combine mapping "
"labels and color by the combined label that is created by inserting an \'&&\' "
"between the input columns, for example: -b \'mapping_column1&&mapping_column2\'."
"If the user wants to color by specific mapping labels, they can use the "
"following code:",
"""%prog -i unweighted_unifrac_pc.txt -m Fasting_Map.txt -b 'Treatment'"""))
script_info['script_usage'].append(("""Scree plot Usage:""",
"A scree plot can tell you how many axes are likely to be important and help "
"determine how many 'real' underlying gradients there might be in your data as "
"well as their relative 'strength'. If you want to generate a scree plot, use "
"the following code.",
"""%prog -i unweighted_unifrac_pc.txt -m Fasting_Map.txt --scree"""))
script_info['script_usage'].append(("""Color by all categories""",
"If the user would like to color all categories in their metadata mapping file, "
"they should not pass -b. Color by all is the default behavior.",
"""%prog -i unweighted_unifrac_pc.txt -m Fasting_Map.txt"""))
script_info['script_usage'].append(("""Prefs File:""",
"The user can supply a prefs file to color by, as follows:",
"""%prog -i unweighted_unifrac_pc.txt -m Fasting_Map.txt -p prefs.txt"""))
script_info[
'script_usage'].append(("""Jackknifed Principal Coordinates (w/ confidence intervals):""",
"If you have created jackknifed PCoA files, you can pass the folder containing "
"those files, instead of a single file. The user can also specify the opacity "
"of the ellipses around each point '--ellipsoid_opacity', which is a value from "
"0-1. Currently there are two metrics '--ellipsoid_method' that can be used for "
"generating the ellipsoids, which are 'IQR' and 'sdev'. The user can specify all "
"of these options as follows:",
"""%prog -i pcoa/ -m Fasting_Map.txt -b 'Treatment&&DOB' --ellipsoid_opacity=0.5 --ellipsoid_method=IQR"""))
script_info[
'output_description'] = """This script generates an output folder, which contains several files. To best view the 2D plots, it is recommended that the user views the _pcoa_2D.html file."""
script_info['required_options'] = [
make_option('-i', '--coord_fname',
help='Input principal coordinates filepath (i.e.,' +
' resulting file from principal_coordinates.py). Alternatively,' +
' a directory containing multiple principal coordinates files for' +
' jackknifed PCoA results.',
type='existing_path'),
make_option('-m', '--map_fname', dest='map_fname',
help='Input metadata mapping filepath',
type='existing_filepath')
]
script_info['optional_options'] = [
make_option('-b', '--colorby', dest='colorby', type='string',
help='Comma-separated list categories metadata categories' +
' (column headers) ' +
'to color by in the plots. The categories must match the name of a ' +
'column header in the mapping file exactly. Multiple categories ' +
'can be list by comma separating them without spaces. The user can ' +
'also combine columns in the mapping file by separating the ' +
'categories by "&&" without spaces. [default=color by all]'),
make_option('-p', '--prefs_path',
help='Input user-generated preferences filepath. NOTE: This is a' +
' file with a dictionary containing preferences for the analysis.' +
' [default: %default]',
type='existing_filepath'),
make_option('-k', '--background_color',
help='Background color to use in the plots. [default: %default]',
default='white', type='choice', choices=['black', 'white'],),
make_option('--ellipsoid_opacity',
help='Used only when plotting ellipsoids for jackknifed' +
' beta diversity (i.e. using a directory of coord files' +
' instead of a single coord file). The valid range is between 0-1.' +
' 0 produces completely transparent (invisible) ellipsoids' +
' and 1 produces completely opaque ellipsoids.' +
' [default=%default]',
default=0.33, type='float'),
make_option('--ellipsoid_method',
help='Used only when plotting ellipsoids for jackknifed' +
' beta diversity (i.e. using a directory of coord files' +
' instead of a single coord file). Valid values are "IQR" and' +
' "sdev". [default=%default]', default="IQR",
type="choice", choices=["IQR", "sdev"]),
make_option('--master_pcoa',
help='Used only when plotting ellipsoids for jackknifed beta diversity' +
' (i.e. using a directory of coord files' +
' instead of a single coord file). These coordinates will be the' +
' center of each ellipisoid. [default: %default; arbitrarily chosen' +
' PC matrix will define the center point]', default=None,
type='existing_filepath'),
make_option(
'--scree',
action='store_true',
help='Generate the scree plot [default: %default]',
default=False),
make_option('--pct_variation_below_one',
action="store_true",
help='Allow the percent variation explained by the axes to be below one. '
'The default behaivor is to multiply by 100 all values if PC1 is < 1.0 '
'[default: %default]', default=False),
options_lookup['output_dir']
]
script_info['option_label'] = {'coord_fname': 'Principal coordinates filepath',
'map_fname': 'QIIME-formatted mapping filepath',
'colorby': 'Colorby category',
'prefs_path': 'Preferences filepath',
'background_color': 'Background color',
'ellipsoid_opacity': 'Ellipsoid opacity',
'ellipsoid_method': 'Ellipsoid method',
'master_pcoa':
'Master principal coordinates filepath',
'output_dir': 'Output directory'}
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
data = {}
prefs, data, background_color, label_color, ball_scale, arrow_colors = \
sample_color_prefs_and_map_data_from_options(opts)
data['ellipsoid_method'] = opts.ellipsoid_method
if 0.00 <= opts.ellipsoid_opacity <= 1.00:
data['alpha'] = opts.ellipsoid_opacity
else:
raise ValueError('The opacity must be a value between 0 and 1!')
# Open and get coord data
if os.path.isdir(opts.coord_fname) and opts.master_pcoa:
data['coord'], data['support_pcoas'] = load_pcoa_files(
opts.coord_fname)
data['coord'] = get_coord(opts.master_pcoa)
elif os.path.isdir(opts.coord_fname):
data['coord'], data['support_pcoas'] = load_pcoa_files(
opts.coord_fname)
else:
data['coord'] = get_coord(opts.coord_fname)
filepath = opts.coord_fname
basename, extension = os.path.splitext(filepath)
filename = '%s_2D_PCoA_plots' % (basename)
# obtaining where the files live so they can be copied
qiime_dir = get_qiime_project_dir()
js_path = os.path.join(qiime_dir, 'qiime', 'support_files', 'js')
if opts.output_dir:
if os.path.exists(opts.output_dir):
dir_path = opts.output_dir
else:
try:
os.mkdir(opts.output_dir)
dir_path = opts.output_dir
except OSError:
pass
else:
dir_path = './'
html_dir_path = dir_path
data_dir_path = mkdtemp(dir=dir_path)
try:
os.mkdir(data_dir_path)
except OSError:
pass
js_dir_path = os.path.join(html_dir_path, 'js')
try:
os.mkdir(js_dir_path)
except OSError:
pass
shutil.copyfile(os.path.join(js_path, 'overlib.js'),
os.path.join(js_dir_path, 'overlib.js'))
try:
action = generate_2d_plots
except NameError:
action = None
# Place this outside try/except so we don't mask NameError in action
if action:
action(
prefs, data, html_dir_path, data_dir_path, filename, background_color,
label_color, opts.scree, opts.pct_variation_below_one)
if __name__ == "__main__":
main()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.