repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
plotly/plotly.py | packages/python/plotly/plotly/express/__init__.py | 1 | 2047 | """
`plotly.express` is a terse, consistent, high-level wrapper around `plotly.graph_objects`
for rapid data exploration and figure generation. Learn more at https://plotly.express/
"""
from __future__ import absolute_import
from plotly import optional_imports
pd = optional_imports.get_module("pandas")
if pd is None:
raise ImportError(
"""\
Plotly express requires pandas to be installed."""
)
from ._imshow import imshow
from ._chart_types import ( # noqa: F401
scatter,
scatter_3d,
scatter_polar,
scatter_ternary,
scatter_mapbox,
scatter_geo,
line,
line_3d,
line_polar,
line_ternary,
line_mapbox,
line_geo,
area,
bar,
timeline,
bar_polar,
violin,
box,
strip,
histogram,
scatter_matrix,
parallel_coordinates,
parallel_categories,
choropleth,
density_contour,
density_heatmap,
pie,
sunburst,
treemap,
icicle,
funnel,
funnel_area,
choropleth_mapbox,
density_mapbox,
)
from ._core import ( # noqa: F401
set_mapbox_access_token,
defaults,
get_trendline_results,
NO_COLOR,
)
from ._special_inputs import IdentityMap, Constant, Range # noqa: F401
from . import data, colors # noqa: F401
__all__ = [
"scatter",
"scatter_3d",
"scatter_polar",
"scatter_ternary",
"scatter_mapbox",
"scatter_geo",
"scatter_matrix",
"density_contour",
"density_heatmap",
"density_mapbox",
"line",
"line_3d",
"line_polar",
"line_ternary",
"line_mapbox",
"line_geo",
"parallel_coordinates",
"parallel_categories",
"area",
"bar",
"timeline",
"bar_polar",
"violin",
"box",
"strip",
"histogram",
"choropleth",
"choropleth_mapbox",
"pie",
"sunburst",
"treemap",
"icicle",
"funnel",
"funnel_area",
"imshow",
"data",
"colors",
"set_mapbox_access_token",
"get_trendline_results",
"IdentityMap",
"Constant",
"Range",
"NO_COLOR",
]
| mit |
jniediek/combinato | combinato/cluster/cluster.py | 1 | 10242 | # JN 2015-01-11
"""
main program for spike sorting
"""
from __future__ import print_function, division, absolute_import
import os
import numpy as np
# pylint: disable=E1101
from .. import SortingManager, SessionManager, options
from time import strftime
from getpass import getuser
import matplotlib.pyplot as mpl
from .wave_features import wavelet_features
from .select_features import select_features
from .define_clusters import define_clusters
from .cluster_features import cluster_features, read_results
from .dist import template_match
from .artifacts import find_artifacts
from .plot_temp import plot_temperatures
USER = getuser()
LOG_FNAME = 'log.txt'
FIRST_MATCH_FACTOR = options['FirstMatchFactor']
def features_to_index(features, folder, name, overwrite=True):
"""
wrapper function
"""
clu = None
if not overwrite:
try:
clu, tree = read_results(folder, name)
print('Read clustering results from ' + folder)
except IOError: # as error:
print('Starting clustering ')
# + error.strerror + ': ' + error.filename)
overwrite = True
if clu is not None:
if features.shape[0] != clu.shape[1] - 2:
print('Read outdated clustering, restarting')
overwrite = True
if overwrite:
feat_idx = select_features(features)
print('Clustering data in {}/{}'.format(folder, name))
cluster_features(features[:, feat_idx], folder, name)
now = strftime('%Y-%m-%d_%H-%M-%S')
log_fname = os.path.join(folder, LOG_FNAME)
with open(log_fname, 'a') as fid_done:
fid_done.write('{} {} ran {}\n'.format(now, USER, name))
fid_done.close()
clu, tree = read_results(folder, name)
idx, tree, used_points = define_clusters(clu, tree)
return idx, tree, used_points
def cluster_step(features, folder, sub_name, overwrite):
"""
one step in clustering
"""
res_idx, tree, used_points = features_to_index(features,
folder,
sub_name,
overwrite)
# save temperature plot here if wanted
if options['plotTemps']:
temp_fig = plot_temperatures(tree, used_points)
temp_fname = os.path.join(folder, 'temp_' + sub_name + '.png')
temp_fig.savefig(temp_fname)
mpl.close(temp_fig)
if options['Debug']:
print('Cluster step {} returned'.format(sub_name))
for clid in np.unique(res_idx):
print('{}: {} spikes'.format(clid, (res_idx == clid).sum()))
return res_idx
def iterative_sorter(features, spikes, n_iterations, name, overwrite=True):
"""
name is used to generate temporary filenames
"""
idx = np.zeros(features.shape[0], np.uint16)
match_idx = np.zeros(features.shape[0], bool)
for i in range(n_iterations):
# input to clustering are the spikes that have no index so far
sub_idx = idx == 0
sub_name = 'sort_' + str(i)
if sub_idx.sum() < options['MinInputSize']:
if options['Debug']:
print('Stopping iteration, {} spikes left'
.format(sub_idx.sum()))
break
if options['Debug']:
print('Clustering {} spikes'.format(sub_idx.sum()))
# res_idx contains a number for each cluster generated from clustering
res_idx = cluster_step(features[sub_idx], name, sub_name, overwrite)
if options['Debug']:
print('Iteration {}, new classes: {}'.
format(i, np.unique(res_idx)))
print('Iteration {}, old classes: {}'.format(i, np.unique(idx)))
clustered_idx = res_idx > 0
prev_idx_max = idx.max()
res_idx[clustered_idx] += prev_idx_max
idx[sub_idx] = res_idx
# now idx contains the new spike numbers
# feed new, sufficiently big clusters into clustering again
# (to reduce under-clustering)
if options['ReclusterClusters']:
clids = np.unique(res_idx[clustered_idx])
for clid in clids:
recluster_idx = idx == clid
cluster_size = recluster_idx.sum()
if cluster_size < options['MinInputSizeRecluster']:
if options['Debug']:
print('Not reclustering cluster {} ({} spikes)'
.format(clid, cluster_size))
continue
else:
if options['Debug']:
print('Reclustering cluster {} ({} spikes)'
.format(clid, cluster_size))
sub_sub_name = '{}_{:02d}'.format(sub_name, clid)
recluster_res_idx = cluster_step(features[recluster_idx],
name, sub_sub_name, overwrite)
# make sure to increase the cluster numbers enough
biggest_clid = idx.max()
recluster_res_idx[recluster_res_idx != 0] += biggest_clid
idx[recluster_idx] = recluster_res_idx
# conservative template matching here
template_match(spikes, idx, match_idx, FIRST_MATCH_FACTOR)
return idx, match_idx
def sort_spikes(spikes, folder, overwrite=False, sign='pos'):
"""
function organizes code
"""
n_iterations = options['RecursiveDepth']
if options['Debug']:
print('Recursive depth is {}.'.format(n_iterations))
# it is suboptimal that we calculate the features
# even when reading clusters from disk
all_features = wavelet_features(spikes)
# sorting includes template match
sorted_idx, match_idx = iterative_sorter(all_features, spikes,
n_iterations, folder,
overwrite=overwrite)
# identify artifact clusters
class_ids = np.unique(sorted_idx)
if options['MarkArtifactClasses']:
invert = True if sign == 'neg' else False
_, artifact_ids = find_artifacts(spikes, sorted_idx, class_ids, invert)
else:
artifact_ids = []
return sorted_idx, match_idx, artifact_ids
def main(data_fname, session_fname, sign, overwrite=False):
"""
sort spikes from given session
"""
sort_man = SortingManager(data_fname)
session = SessionManager(session_fname)
idx = session.index
spikes = sort_man.get_data_by_name_and_index('spikes', idx, sign)
sort_idx, match_idx, artifact_ids =\
sort_spikes(spikes, session.session_dir,
overwrite=overwrite, sign=sign)
all_ids = np.unique(sort_idx)
artifact_scores = np.zeros((len(all_ids), 2), np.uint8)
artifact_scores[:, 0] = all_ids
for cl_id in all_ids:
idx = artifact_scores[:, 0] == cl_id
artifact_score = 1 if cl_id in artifact_ids else 0
artifact_scores[idx, 1] = artifact_score
session.update_classes(sort_idx)
session.update_sorting_data(match_idx, artifact_scores)
session.h5file.close()
def sort_helper(args):
"""
usual multiprocessing helper, used to un
"""
main(args[0], args[2], args[1], options['overwrite'])
def write_options(fname='css-cluster-log.txt'):
"""
save options to log file
"""
print('Writing options to file {}'.format(fname))
msg = strftime('%Y-%m-%d_%H-%M-%S') + ' ' + USER + '\n'
for key in sorted(options.keys()):
if key in ['density_hist_bins', 'cmap']:
continue
msg += '{}: {}\n'.format(key, options[key])
msg += 60 * '-' + '\n'
with open(fname, 'a') as fid:
fid.write(msg)
fid.close()
def test_joblist(joblist):
"""
simple test to detect whether the same job is
requested more than once
"""
unique_joblist = set(joblist)
if len(joblist) != len(unique_joblist):
# there are duplicates!
counter = dict()
for item in joblist:
if item in counter:
counter[item] += 1
else:
counter[item] = 1
for key, val in counter.items():
if val > 1:
print('Job {} requested {} times'.format(key, val))
raise ValueError('Duplicate jobs requested')
def argument_parser():
"""
standard argument parsing
"""
from argparse import ArgumentParser, FileType, ArgumentError
from multiprocessing import Pool, cpu_count
parser = ArgumentParser('css-cluster',
description='Combinato Spike Sorter. This is the'
' main clustering executable. Specify'
' either a jobfile or datafile and '
'sessions.',
epilog='Johannes Niediek ([email protected])')
parser.add_argument('--jobs', type=FileType('r'))
parser.add_argument('--datafile', nargs=1)
parser.add_argument('--sessions', nargs='+')
parser.add_argument('--single', default=False, action='store_true')
# possibilities:
# 1) jobs is supplied, and neither datafile nor session
# 2) datafile and sessions are supplied
args = parser.parse_args()
if args.jobs is None:
if None in (args.datafile, args.sessions):
raise ArgumentError(args.jobs,
'Specify either jobs or datafile and sessions')
else:
joblist = []
for session in args.sessions:
sign = 'neg' if 'neg' in session else 'pos'
joblist.append([args.datafile[0], sign, session])
else:
jobdata = args.jobs.read().splitlines()
joblist = tuple((tuple(line.split()) for line in jobdata))
test_joblist(joblist)
n_cores = 1 if args.single else cpu_count() + 1
print('Starting {} jobs with {} workers'.
format(len(joblist), n_cores))
write_options()
if n_cores == 1:
[sort_helper(job) for job in joblist]
else:
pool = Pool(n_cores)
pool.map(sort_helper, joblist)
| mit |
decvalts/cartopy | lib/cartopy/examples/regridding_arrows.py | 4 | 1656 | """
Regridding vectors with quiver
------------------------------
This example demonstrates the regridding functionality in quiver (there exists
equivalent functionality in :meth:`cartopy.mpl.geoaxes.GeoAxes.barbs`).
Regridding can be an effective way of visualising a vector field, particularly
if the data is dense or warped.
"""
__tags__ = ['Vector data']
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
def sample_data(shape=(20, 30)):
"""
Return ``(x, y, u, v, crs)`` of some vector data
computed mathematically. The returned CRS will be a North Polar
Stereographic projection, meaning that the vectors will be unevenly
spaced in a PlateCarree projection.
"""
crs = ccrs.NorthPolarStereo()
scale = 1e7
x = np.linspace(-scale, scale, shape[1])
y = np.linspace(-scale, scale, shape[0])
x2d, y2d = np.meshgrid(x, y)
u = 10 * np.cos(2 * x2d / scale + 3 * y2d / scale)
v = 20 * np.cos(6 * x2d / scale)
return x, y, u, v, crs
def main():
fig = plt.figure(figsize=(8, 10))
x, y, u, v, vector_crs = sample_data(shape=(50, 50))
ax1 = fig.add_subplot(2, 1, 1, projection=ccrs.PlateCarree())
ax1.coastlines('50m')
ax1.set_extent([-45, 55, 20, 80], ccrs.PlateCarree())
ax1.quiver(x, y, u, v, transform=vector_crs)
ax2 = fig.add_subplot(2, 1, 2, projection=ccrs.PlateCarree())
ax2.set_title('The same vector field regridded')
ax2.coastlines('50m')
ax2.set_extent([-45, 55, 20, 80], ccrs.PlateCarree())
ax2.quiver(x, y, u, v, transform=vector_crs, regrid_shape=20)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
magne-max/zipline-ja | tests/finance/test_slippage.py | 1 | 24499 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Unit tests for finance.slippage
'''
import datetime
import pytz
from nose_parameterized import parameterized
import pandas as pd
from pandas.tslib import normalize_date
from zipline.finance.slippage import VolumeShareSlippage
from zipline.protocol import DATASOURCE_TYPE
from zipline.finance.blotter import Order
from zipline.data.data_portal import DataPortal
from zipline.protocol import BarData
from zipline.testing import tmp_bcolz_equity_minute_bar_reader
from zipline.testing.fixtures import (
WithDataPortal,
WithSimParams,
ZiplineTestCase,
)
class SlippageTestCase(WithSimParams, WithDataPortal, ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-05 14:31', tz='utc')
END_DATE = pd.Timestamp('2006-01-05 14:36', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = 'minute'
SIM_PARAMS_EMISSION_RATE = 'daily'
ASSET_FINDER_EQUITY_SIDS = (133,)
ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp('2006-01-05', tz='utc')
ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp('2006-01-07', tz='utc')
minutes = pd.DatetimeIndex(
start=START_DATE,
end=END_DATE - pd.Timedelta('1 minute'),
freq='1min'
)
@classmethod
def make_equity_minute_bar_data(cls):
yield 133, pd.DataFrame(
{
'open': [3.0, 3.0, 3.5, 4.0, 3.5],
'high': [3.15, 3.15, 3.15, 3.15, 3.15],
'low': [2.85, 2.85, 2.85, 2.85, 2.85],
'close': [3.0, 3.5, 4.0, 3.5, 3.0],
'volume': [2000, 2000, 2000, 2000, 2000],
},
index=cls.minutes,
)
@classmethod
def init_class_fixtures(cls):
super(SlippageTestCase, cls).init_class_fixtures()
cls.ASSET133 = cls.env.asset_finder.retrieve_asset(133)
def test_volume_share_slippage(self):
assets = (
(133, pd.DataFrame(
{
'open': [3.00],
'high': [3.15],
'low': [2.85],
'close': [3.00],
'volume': [200],
},
index=[self.minutes[0]],
)),
)
days = pd.date_range(
start=normalize_date(self.minutes[0]),
end=normalize_date(self.minutes[-1])
)
with tmp_bcolz_equity_minute_bar_reader(self.trading_calendar, days, assets) \
as reader:
data_portal = DataPortal(
self.env.asset_finder, self.trading_calendar,
first_trading_day=reader.first_trading_day,
equity_minute_reader=reader,
)
slippage_model = VolumeShareSlippage()
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
sid=self.ASSET133
)
]
bar_data = BarData(data_portal,
lambda: self.minutes[0],
'minute',
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.0001875),
'dt': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': int(5),
'sid': int(133),
'commission': None,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
# TODO: Make expected_txn an Transaction object and ensure there
# is a __eq__ for that class.
self.assertEquals(expected_txn, txn.__dict__)
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
sid=self.ASSET133
)
]
# Set bar_data to be a minute ahead of last trade.
# Volume share slippage should not execute when there is no trade.
bar_data = BarData(data_portal,
lambda: self.minutes[1],
'minute',
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
def test_orders_limit(self):
slippage_model = VolumeShareSlippage()
slippage_model.data_portal = self.data_portal
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': self.ASSET133,
'limit': 3.5})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[3],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': self.ASSET133,
'limit': 3.5})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[3],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': self.ASSET133,
'limit': 3.6})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[3],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
txn = orders_txns[0][1]
expected_txn = {
'price': float(3.50021875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
# we ordered 100 shares, but default volume slippage only allows
# for 2.5% of the volume. 2.5% * 2000 = 50 shares
'amount': int(50),
'sid': int(133),
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': self.ASSET133,
'limit': 3.5})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[0],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': self.ASSET133,
'limit': 3.5})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[0],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': self.ASSET133,
'limit': 3.4})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[1],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.49978125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-50),
'sid': int(133)
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
STOP_ORDER_CASES = {
# Stop orders can be long/short and have their price greater or
# less than the stop.
#
# A stop being reached is conditional on the order direction.
# Long orders reach the stop when the price is greater than the stop.
# Short orders reach the stop when the price is less than the stop.
#
# Which leads to the following 4 cases:
#
# | long | short |
# | price > stop | | |
# | price < stop | | |
#
# Currently the slippage module acts according to the following table,
# where 'X' represents triggering a transaction
# | long | short |
# | price > stop | | X |
# | price < stop | X | |
#
# However, the following behavior *should* be followed.
#
# | long | short |
# | price > stop | X | |
# | price < stop | | X |
'long | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 4.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 4.0,
'open': 3.5
},
'expected': {
'transaction': {
'price': 4.00025,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': 50,
'sid': 133,
}
}
},
'long | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.6
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 4.0
},
'expected': {
'transaction': None
}
},
'short | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.4
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 3.0
},
'expected': {
'transaction': None
}
},
'short | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.0,
'open': 3.0
},
'expected': {
'transaction': {
'price': 2.9998125,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': -50,
'sid': 133,
}
}
},
}
@parameterized.expand([
(name, case['order'], case['event'], case['expected'])
for name, case in STOP_ORDER_CASES.items()
])
def test_orders_stop(self, name, order_data, event_data, expected):
data = order_data
data['sid'] = self.ASSET133
order = Order(**data)
assets = (
(133, pd.DataFrame(
{
'open': [event_data['open']],
'high': [event_data['high']],
'low': [event_data['low']],
'close': [event_data['close']],
'volume': [event_data['volume']],
},
index=[pd.Timestamp('2006-01-05 14:31', tz='UTC')],
)),
)
days = pd.date_range(
start=normalize_date(self.minutes[0]),
end=normalize_date(self.minutes[-1])
)
with tmp_bcolz_equity_minute_bar_reader(self.trading_calendar, days, assets) \
as reader:
data_portal = DataPortal(
self.env.asset_finder, self.trading_calendar,
first_trading_day=reader.first_trading_day,
equity_minute_reader=reader,
)
slippage_model = VolumeShareSlippage()
try:
dt = pd.Timestamp('2006-01-05 14:31', tz='UTC')
bar_data = BarData(data_portal,
lambda: dt,
'minute',
self.trading_calendar)
_, txn = next(slippage_model.simulate(
bar_data,
self.ASSET133,
[order],
))
except StopIteration:
txn = None
if expected['transaction'] is None:
self.assertIsNone(txn)
else:
self.assertIsNotNone(txn)
for key, value in expected['transaction'].items():
self.assertEquals(value, txn[key])
def test_orders_stop_limit(self):
slippage_model = VolumeShareSlippage()
slippage_model.data_portal = self.data_portal
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': self.ASSET133,
'stop': 4.0,
'limit': 3.0})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[2],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = BarData(self.data_portal,
lambda: self.minutes[3],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': self.ASSET133,
'stop': 4.0,
'limit': 3.5})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[2],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = BarData(self.data_portal,
lambda: self.minutes[3],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': self.ASSET133,
'stop': 4.0,
'limit': 3.6})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[2],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = BarData(self.data_portal,
lambda: self.minutes[3],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.50021875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(50),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': self.ASSET133,
'stop': 3.0,
'limit': 4.0})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[0],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = BarData(self.data_portal,
lambda: self.minutes[1],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': self.ASSET133,
'stop': 3.0,
'limit': 3.5})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[0],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = BarData(self.data_portal,
lambda: self.minutes[1],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': self.ASSET133,
'stop': 3.0,
'limit': 3.4})
]
bar_data = BarData(self.data_portal,
lambda: self.minutes[0],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = BarData(self.data_portal,
lambda: self.minutes[1],
self.sim_params.data_frequency,
self.trading_calendar)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.49978125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-50),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
| apache-2.0 |
pramodh-bn/learn-data-edx | Final/t13.py | 1 | 6049 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 17:11:46 2013
@author: pramodh
"""
import numpy as np
from sklearn import svm, cluster
def getPoints(numberOfPoints):
pointList = np.c_[np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)]
return pointList
def applyFunction(points):
return np.sign(points[:,1]-points[:,0]+0.25*np.sin(np.pi * points[:,0]))
def doAssignment13():
experiments = 1000
gama = 1.5
numPoints = 100
clf = svm.SVC(C= np.inf , kernel="rbf", coef0=1, gamma=gama)
Ein0 = 0
for i in range(experiments):
X = getPoints(numPoints)
y = applyFunction(X)
clf.fit(X,y)
#print(clf.score(X,y))
if(1-clf.score(X,y)==0):
#print("here")
Ein0 += 1
print(1-float(Ein0)/experiments)
def doAssignment14():
gama = 1.5
numPoints = 100
k = 9
experiments = 20
#km = cluster.KMeans(n_clusters=k, init='k-means++', n_init=5)
km = cluster.KMeans(n_clusters=k)
clf = svm.SVC(C= np.inf , kernel="rbf", coef0=1, gamma=gama)
for j in range(10):
montelist = []
for i in range(experiments):
X = getPoints(numPoints)
y = applyFunction(X)
u = km.fit(X).cluster_centers_
theta = rbfTransform(X, u, gama)
w = np.linalg.lstsq(theta, y)[0]
clf.fit(X,y)
k = [doMonteCarlo(w, clf, 1000, u, gama) for i in range(100)]
ar = np.array(k)
#print(float(len(ar)-np.sum(ar[:,0]-ar[:,1] > 0))/len(ar))
montelist.append(float(len(ar)-np.sum(ar[:,0]-ar[:,1] > 0))/len(ar))
print(np.mean(montelist))
def doAssignment15():
gama = 1.5
numPoints = 100
k = 12
experiments = 20
#km = cluster.KMeans(n_clusters=k, init='k-means++', n_init=5)
km = cluster.KMeans(n_clusters=k)
clf = svm.SVC(C= np.inf , kernel="rbf", coef0=1, gamma=gama)
for j in range(10):
montelist = []
for i in range(experiments):
X = getPoints(numPoints)
y = applyFunction(X)
u = km.fit(X).cluster_centers_
theta = rbfTransform(X, u, gama)
w = np.linalg.lstsq(theta, y)[0]
clf.fit(X,y)
k = [doMonteCarlo(w, clf, 1000, u, gama) for i in range(100)]
ar = np.array(k)
#print(float(len(ar)-np.sum(ar[:,0]-ar[:,1] > 0))/len(ar))
montelist.append(float(len(ar)-np.sum(ar[:,0]-ar[:,1] > 0))/len(ar))
print(np.mean(montelist))
def dist2(x):
return x.T.dot(x)
def rbfTransform(X, U, gamma):
Fi = np.array([[np.exp(-gamma * dist2(x - u)) for u in U] for x in X])
#print("Fi", Fi)
return np.insert(Fi, 0, 1, 1)
def getMisMatches(X, y, weights, centers, gama):
results = []
for x in X:
k = [weights[i] * np.exp(-gama * dist2(x - centers[i-1])) for i in range(1, len(weights))]
#print(sum(k)+weights[0])
results.append(np.sign(sum(k)+weights[0]))
return float(len(X) - np.sum(np.sign(results) == np.sign(y)))/len(X)
def doMonteCarlo(w, clf, numPoints, centers, gama):
X = getPoints(numPoints)
y = applyFunction(X)
eout_hard = 1.0-clf.score(X,y)
eout_reg = getMisMatches(X, y, w, centers, gama)
return (eout_hard, eout_reg)
def doMonteCarloReg(w9, w12, numPoints, centers9, centers12, gama):
X = getPoints(numPoints)
y = applyFunction(X)
return (getMisMatches(X, y, w9, centers9, gama),getMisMatches(X,y,w12,centers12, gama))
if __name__ == '__main__':
gama = 1.5
numPoints = 100
experiments = 100
Ein = []
Eout = []
km9 = cluster.KMeans(n_clusters=9, n_init=1)
km12 = cluster.KMeans(n_clusters=12, n_init=1)
for i in range(experiments):
X = getPoints(numPoints)
y = applyFunction(X)
u9 = km9.fit(X).cluster_centers_
u12 = km12.fit(X).cluster_centers_
theta9 = rbfTransform(X, u9, gama)
theta12 = rbfTransform(X, u12, gama)
w9 = np.linalg.lstsq(theta9, y)[0]
w12 = np.linalg.lstsq(theta12, y)[0]
Ein.append((getMisMatches(X,y,w9, u9, gama), getMisMatches(X,y, w12, u12, gama)))
kl = [doMonteCarloReg(w9, w12, 1000, u9, u12, gama) for i in range(100)]
ar = np.array(kl)
#print(ar)
Eout.append((np.mean(ar[:,0]), np.mean(ar[:,1])))
#print(np.mean(montelist))
#print(Ein)
#print(Eout)
'''for i in range(len(Ein)):
inE = Ein[i]
outE = Eout[i]
strout = ''
if(inE[0] - inE[1] == 0):
strout += 'Same,'
elif(inE[0] - inE[1] > 0):
strout += 'Down, '
else:
strout += 'Up, '
if(outE[0] - outE[1] == 0):
strout += 'Same'
elif(outE[0] - outE[1] > 0):
strout += 'Down'
else:
strout += 'Up'
print(strout)'''
winnerlist = []
for i in range(len(Ein)):
inE = Ein[i]
outE = Eout[i]
E_in_12 = inE[1]
E_in_9 = inE[0]
E_out_12 = outE[1]
E_out_9 = outE[0]
if E_in_12 == E_in_9 and E_out_12 == E_out_9:
winnerlist.append('e')
elif E_in_12 < E_in_9 and E_out_12 > E_out_9:
winnerlist.append('a')
elif E_in_12 > E_in_9 and E_out_12 < E_out_9:
winnerlist.append('b')
elif E_in_12 > E_in_9 and E_out_12 > E_out_9:
winnerlist.append('c')
elif E_in_12 < E_in_9 and E_out_12 < E_out_9:
winnerlist.append('d')
else:
winnerlist.append('f')
print("a", float(winnerlist.count('a'))/len(winnerlist))
print("b", float(winnerlist.count('b'))/len(winnerlist))
print("c", float(winnerlist.count('c'))/len(winnerlist))
print("d", float(winnerlist.count('d'))/len(winnerlist))
print("e", float(winnerlist.count('e'))/len(winnerlist))
print("f", float(winnerlist.count('f'))/len(winnerlist))
| unlicense |
oliverlee/sympy | doc/ext/docscrape_sphinx.py | 51 | 9709 | from __future__ import division, absolute_import, print_function
import sys
import re
import inspect
import textwrap
import pydoc
import sphinx
import collections
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
# Lines that are commented out are used to make the
# autosummary:: table. Since SymPy does not use the
# autosummary:: functionality, it is easiest to just comment it
# out.
# autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
# if param_obj and (pydoc.getdoc(param_obj) or not desc):
# # Referenced object has a docstring
# autosum += [" %s%s" % (prefix, param)]
# else:
others.append((param, param_type, desc))
# if autosum:
# out += ['.. autosummary::']
# if self.class_members_toctree:
# out += [' :toctree:']
# out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', '', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns('Returns')
out += self._str_returns('Yields')
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
frodo4fingers/gimod | mpl/magnetizedgrid.py | 1 | 8227 | from matplotlib.lines import Line2D
# from numpy import allclose
import numpy as np
try:
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import Qt, QPoint
except ImportError:
from PyQt4.QtQui import QCursor, QApplication, QWidget
from PyQt4.QtCore import Qt, QPoint
import time
class MagnetizedGrid():
"""
Everything the grid resembles is stored here. The drawing, the
magnetization and 'catching' and the scalability.
Note
----
4me: scrolling with ctrl+g should increase/decrease the density of lines drawn
Todo
----
*[x] make grid
*[x] make grid magnetized
*[ ] make grid free scalable
"""
def __init__(self, parent=None):
"""Initialize the important variables."""
self.figure = parent.figure
self.parent = parent # builder
self.gimod = parent.parent
dot, = self.figure.axis.plot([], [], 'o', c='#ff0000')
self.dot = dot
self.grid()
self.getCanvasHeight()
self.onMotion = self.onMotion
def getCanvasHeight(self):
"""."""
_, self.height = self.figure.canvas.get_width_height()
def connect(self):
"""."""
self.cid_p = self.figure.canvas.mpl_connect('button_press_event', self.onPress)
self.cid_m = self.figure.canvas.mpl_connect('motion_notify_event', self.onMotion)
self.cid_ae = self.figure.canvas.mpl_connect('axes_enter_event', self.axesEnter)
self.cid_al = self.figure.canvas.mpl_connect('axes_leave_event', self.axesLeave)
self.cid_r = self.figure.canvas.mpl_connect('button_release_event', self.onRelease)
def axesLeave(self, event):
QApplication.restoreOverrideCursor()
def axesEnter(self, event):
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
def disconnect(self):
"""."""
try:
self.figure.canvas.mpl_disconnect(self.cid_p)
self.figure.canvas.mpl_disconnect(self.cid_m)
self.figure.canvas.mpl_disconnect(self.cid_ae)
self.figure.canvas.mpl_disconnect(self.cid_al)
self.figure.canvas.mpl_disconnect(self.cid_r)
except AttributeError:
# bc the grid might never be magnetized, thus not having any cid to disconnect
pass
def disable(self):
"""Disable the grid and reset the dot."""
self.figure.axis.grid(False)
self.dot = self.figure.axis.plot([], [], 'o', c='#ff0000')
self.figure.canvas.draw()
def grid(self):
"""Establish the grid and save the grid positions."""
# set the actual grid
self.figure.axis.grid()
self.figure.canvas.draw()
# get the axis ticks. returns a list of x,y-tuple
x_ticks = [i.get_position()[0] for i in
self.figure.axis.get_xticklabels()]
y_ticks = [i.get_position()[1] for i in
self.figure.axis.get_yticklabels()]
# establish all cross sections as pixel position data
self.crossings = []
for x in x_ticks:
for y in y_ticks:
self.crossings.append(tuple(self.figure.axis.transData.transform((x, y))))
def onPress(self, event):
"""Set the magnetic dot if somewhere near the grid ON PRESS."""
if event.inaxes:
try:
dot, _ = self.vicinity(event.xdata, event.ydata)
except TypeError:
# no vicinity at all
self.x_p = event.xdata
self.y_p = event.ydata
else:
# if dot is not None:
self.x_p = dot[0]
self.y_p = dot[1]
self._checkDot()
self.dot.set_data(self.x_p, self.y_p)
self.dot.set_animated(True)
self.figure.canvas.draw()
self.background = self.figure.canvas.copy_from_bbox(self.dot.axes.bbox)
self.dot.axes.draw_artist(self.dot)
self.figure.canvas.blit(self.dot.axes.bbox)
def onMotion(self, event):
"""."""
if event.inaxes: # meaning within the plotting area
self._checkDot()
self.dot.set_animated(True)
self.figure.canvas.draw()
self.background = self.figure.canvas.copy_from_bbox(self.dot.axes.bbox)
self.dot.axes.draw_artist(self.dot)
self.figure.canvas.blit(self.dot.axes.bbox)
try:
dot, color = self.vicinity(event.xdata, event.ydata)
self.x_m = dot[0]
self.y_m = dot[1]
self.dot.set_data(self.x_m, self.y_m)
self.dot.set_color(color)
self.figure.canvas.restore_region(self.background)
self.dot.axes.draw_artist(self.dot)
self.figure.canvas.blit(self.dot.axes.bbox)
except (ValueError, TypeError):
self.x_m = event.xdata
self.y_m = event.ydata
self._checkDot()
self.dot.set_data([], [])
self.dot.axes.draw_artist(self.dot)
self.dot.set_animated(False)
self.background = None
def onRelease(self, event):
"""Set the magnetic dot if somewhere near the grid ON RELEASE."""
if event.inaxes:
try:
dot, _ = self.vicinity(event.xdata, event.ydata)
except TypeError:
# no vicinity at all
self.x_r = event.xdata
self.y_r = event.ydata
self._checkDot()
self.dot.set_data([], [])
else:
# if dot is not None:
self.x_r = dot[0]
self.y_r = dot[1]
self._checkDot()
self.dot.set_data([], [])
self.dot.axes.draw_artist(self.dot)
self.dot.set_animated(False)
self.background = None
self.figure.canvas.draw()
def transform(self, x, y):
"""Transform the given x-y-coordinates to pixel position."""
return self.figure.axis.transData.transform((x, y))
def vicinity(self, x, y, picker=10):
"""
Calculate the distance between a set magnet and the clicked position.
Parameters
----------
x: float
The x-position of the current mouse event
y: float
The y-position of the current mouse event
picker: int [10]
Sets the sensitive distance to snap from
Returns
-------
tuple()
A tuple holding the cartesian x,y-coordinates of the point that will be snapped to
"""
# pixel holds the current cursor postion
pixel = tuple(self.transform(x, y))
# crossings are all possible positions of grid lines
for pos in self.crossings:
dist_x = abs(pos[0] - pixel[0])
dist_y = abs(pos[1] - pixel[1])
color = '#ff0000'
if dist_x <= picker and dist_y <= picker:
self.parent.statusbar.showMessage("Locked X-Y-Axes", 1000)
dot_x = pos[0]
dot_y = pos[1]
color = '#61ff00'
# NOTE: 1 <= dist <= picker allows a 5 pixel radius around each
# joint so that the junctions can be reached better
elif 1 <= dist_x <= picker and dist_y > picker:
self.parent.statusbar.showMessage("Locked X-Axis", 1000)
dot_x = pos[0]
dot_y = pixel[1]
elif 1 <= dist_y <= picker and dist_x > picker:
self.parent.statusbar.showMessage("Locked Y-Axis", 1000)
dot_x = pixel[0]
dot_y = pos[1]
if 'dot_x' and 'dot_y' in locals():
return self.figure.axis.transData.inverted().transform((dot_x, dot_y)), color
def _checkDot(self):
"""
TODO: why suddenly a list?!
HACK: pretty dirty.. but works for now
"""
if isinstance(self.dot, list):
self.dot = self.dot[0]
if __name__ == '__main__':
pass
| gpl-3.0 |
pypot/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
subodhchhabra/glances | setup.py | 1 | 2949 | #!/usr/bin/env python
import glob
import sys
from setuptools import setup, Command
if sys.version_info < (2, 6) or (3, 0) <= sys.version_info < (3, 3):
print('Glances requires at least Python 2.6 or 3.3 to run.')
sys.exit(1)
class tests(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
import sys
for t in glob.glob('unitest.py'):
ret = subprocess.call([sys.executable, t]) != 0
if ret != 0:
raise SystemExit(ret)
raise SystemExit(0)
def get_data_files():
data_files = [
('share/doc/glances', ['AUTHORS', 'COPYING', 'NEWS', 'README.rst',
'conf/glances.conf']),
('share/man/man1', ['docs/man/glances.1'])
]
return data_files
def get_requires():
requires = ['psutil>=2.0.0']
if sys.platform.startswith('win'):
requires += ['colorconsole']
if sys.version_info[:2] == (2, 6):
requires += ['argparse', 'logutils']
return requires
setup(
name='Glances',
version='2.6.2',
description="A cross-platform curses-based monitoring tool",
long_description=open('README.rst').read(),
author='Nicolas Hennion',
author_email='[email protected]',
url='https://github.com/nicolargo/glances',
license="LGPL",
keywords="cli curses monitoring system",
install_requires=get_requires(),
extras_require={
'WEB': ['bottle', 'requests'],
'SENSORS': ['py3sensors'],
'BATINFO': ['batinfo'],
'SNMP': ['pysnmp'],
'CHART': ['matplotlib'],
'BROWSER': ['zeroconf>=0.17'],
'IP': ['netifaces'],
'RAID': ['pymdstat'],
'DOCKER': ['docker-py'],
'EXPORT': ['influxdb>=1.0.0', 'potsdb' 'statsd', 'pika'],
'ACTION': ['pystache'],
'CPUINFO': ['py-cpuinfo'],
'FOLDERS': ['scandir']
},
packages=['glances'],
include_package_data=True,
data_files=get_data_files(),
cmdclass={'test': tests},
test_suite="unitest.py",
entry_points={"console_scripts": ["glances=glances:main"]},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
| lgpl-3.0 |
dssg/wikienergy | disaggregator/build/pandas/pandas/io/sql.py | 1 | 58024 | # -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date
import warnings
import traceback
import re
import numpy as np
import pandas.lib as lib
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame, Series
from pandas.core.common import isnull
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
from pandas.util.decorators import Appender
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
#------------------------------------------------------------------------------
#--- Helper functions
_SQLALCHEMY_INSTALLED = None
def _is_sqlalchemy_engine(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Engine)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, coerce=True, unit=format)
elif (issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, coerce=True, unit=format)
else:
return to_datetime(col, coerce=True, format=format)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
#------------------------------------------------------------------------------
#--- Deprecated tquery and uquery
def _safe_fetch(cur):
try:
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
except Exception as e: # pragma: no cover
excName = e.__class__.__name__
if excName == 'OperationalError':
return []
def tquery(sql, con=None, cur=None, retry=True):
"""
DEPRECATED. Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con, params).fetchall()
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: deprecated, cursor is obtained from connection
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is deprecated, and will be removed in future versions. "
"You can use ``execute(...).fetchall()`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur)
result = _safe_fetch(cur)
if con is not None:
try:
cur.close()
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName == 'OperationalError': # pragma: no cover
print('Failed to commit, may need to restart interpreter')
else:
raise
traceback.print_exc()
if retry:
return tquery(sql, con=con, retry=False)
if result and len(result[0]) == 1:
# python 3 compat
result = list(lzip(*result)[0])
elif result is None: # pragma: no cover
result = []
return result
def uquery(sql, con=None, cur=None, retry=True, params=None):
"""
DEPRECATED. Does the same thing as tquery, but instead of returning
results, it returns the number of rows affected. Good for update queries.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con).rowcount
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: deprecated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is deprecated, and will be removed in future versions. "
"You can use ``execute(...).rowcount`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur, params=params)
result = cur.rowcount
try:
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName != 'OperationalError':
raise
traceback.print_exc()
if retry:
print('Looks like your connection failed, reconnecting...')
return uquery(sql, con, retry=False)
return result
#------------------------------------------------------------------------------
#--- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy engine, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy engine
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
if not _is_sqlalchemy_engine(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy engines.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name])
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string
SQL query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed or database table name.
con : SQLAlchemy engine or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query).
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor='sqlite', schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
_MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated "
"and will be removed in future versions. "
"MySQL will be further supported with SQLAlchemy engines.")
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
if _is_sqlalchemy_engine(con):
return SQLDatabase(con, schema=schema, meta=meta)
else:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return SQLiteDatabase(con, flavor, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.engine))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isnull(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns
and self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index.get_level_values(i))
column_names_and_types.append((idx_label, idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
pkc = PrimaryKeyConstraint(self.keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._numpy_type(sql_col.type)
if col_type is datetime or col_type is date:
if not issubclass(df_col.dtype.type, np.datetime64):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notnull_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnulldata = col[~isnull(col)]
if len(notnulldata):
col_for_inference = notnulldata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
from sqlalchemy.types import (BigInteger, Float, Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64':
try:
tz = col.tzinfo
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning)
return BigInteger
elif col_type == 'floating':
return Float
elif col_type == 'integer':
# TODO: Refine integer size.
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _numpy_type(self, sqltype):
from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date
if isinstance(sqltype, Float):
return float
if isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
if isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
if isinstance(sqltype, Date):
return date
if isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy engine"
" or connection+sql flavor")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy engine"
" or connection+sql flavor")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy engine
Engine to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.engine = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.engine, schema=schema)
self.meta = meta
def run_transaction(self):
return self.engine.begin()
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy engine"""
return self.engine.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type.
"""
if dtype is not None:
import sqlalchemy.sql.type_api as type_api
for col, my_type in dtype.items():
if not issubclass(my_type, type_api.TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
# check for potentially case sensitivity issues (GH7815)
if name not in self.engine.table_names(schema=schema or self.meta.schema):
warnings.warn("The provided table name '{0}' is not found exactly "
"as such in the database after writing the table, "
"possibly due to case sensitivity issues. Consider "
"using lower case table names.".format(name), UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.engine.has_table(name, schema or self.meta.schema)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
return self.meta.tables.get('.'.join([schema, table_name]))
else:
return self.meta.tables.get(table_name)
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.engine.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# Flavour specific sql strings and handler class for access to DBs without
# SQLAlchemy installed
# SQL type convertions for each DB
_SQL_TYPES = {
'string': {
'mysql': 'VARCHAR (63)',
'sqlite': 'TEXT',
},
'floating': {
'mysql': 'FLOAT',
'sqlite': 'REAL',
},
'integer': {
'mysql': 'BIGINT',
'sqlite': 'INTEGER',
},
'datetime': {
'mysql': 'DATETIME',
'sqlite': 'TIMESTAMP',
},
'date': {
'mysql': 'DATE',
'sqlite': 'DATE',
},
'time': {
'mysql': 'TIME',
'sqlite': 'TIME',
},
'boolean': {
'mysql': 'BOOLEAN',
'sqlite': 'INTEGER',
}
}
# SQL enquote and wildcard symbols
_SQL_SYMB = {
'mysql': {
'br_l': '`',
'br_r': '`',
'wld': '%s'
},
'sqlite': {
'br_l': '[',
'br_r': ']',
'wld': '?'
}
}
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(str, self.frame.columns))
flv = self.pd_sql.flavor
br_l = _SQL_SYMB[flv]['br_l'] # left val quote char
br_r = _SQL_SYMB[flv]['br_r'] # right val quote char
wld = _SQL_SYMB[flv]['wld'] # wildcard char
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [br_l + column + br_r for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.name, col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING)
flv = self.pd_sql.flavor
br_l = _SQL_SYMB[flv]['br_l'] # left val quote char
br_r = _SQL_SYMB[flv]['br_r'] # right val quote char
create_tbl_stmts = [(br_l + '%s' + br_r + ' %s') % (cname, col_type)
for cname, col_type, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
cnames_br = ",".join([br_l + c + br_r for c in self.keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + self.name + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([br_l + c + br_r for c in ix_cols])
create_stmts.append(
"CREATE INDEX ix_{tbl}_{cnames} ON {tbl} ({cnames_br})".format(
tbl=self.name, cnames=cnames, cnames_br=cnames_br))
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type][self.pd_sql.flavor]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
For now still supports `flavor` argument to deal with 'mysql' database
for backwards compatibility, but this will be removed in future versions.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor, is_cursor=False):
self.is_cursor = is_cursor
self.con = con
if flavor is None:
flavor = 'sqlite'
if flavor not in ['sqlite', 'mysql']:
raise NotImplementedError
else:
self.flavor = flavor
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError("Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string.
"""
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
flavor_map = {
'sqlite': ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name='%s';") % name,
'mysql': "SHOW TABLES LIKE '%s'" % name}
query = flavor_map.get(self.flavor)
return len(self.execute(query).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % name
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys)
return str(table.sql_schema())
def get_schema(frame, name, flavor='sqlite', keys=None, con=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
keys : string or sequence
columns to use a primary key
con: an open SQL database connection object or an SQLAlchemy engine
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys)
# legacy names, with depreciation warnings and copied docs
@Appender(read_sql.__doc__, join='\n')
def read_frame(*args, **kwargs):
"""DEPRECATED - use read_sql
"""
warnings.warn("read_frame is deprecated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
@Appender(read_sql.__doc__, join='\n')
def frame_query(*args, **kwargs):
"""DEPRECATED - use read_sql
"""
warnings.warn("frame_query is deprecated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""DEPRECATED - use to_sql
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
con : DBAPI2 connection
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default False
Write DataFrame index as a column
Notes
-----
This function is deprecated in favor of ``to_sql``. There are however
two differences:
- With ``to_sql`` the index is written to the sql database by default. To
keep the behaviour this function you need to specify ``index=False``.
- The new ``to_sql`` function supports sqlalchemy engines to work with
different sql flavors.
See also
--------
pandas.DataFrame.to_sql
"""
warnings.warn("write_frame is deprecated, use to_sql", FutureWarning)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,
index=index, **kwargs)
| mit |
DStauffman/dstauffman | dstauffman/tests/test_plotting_plotting.py | 1 | 22249 | r"""
Test file for the `plotting` module of the "dstauffman.plotting" library.
Notes
-----
#. Written by David C. Stauffer in March 2015.
"""
#%% Imports
import datetime
from typing import List, Optional
import unittest
from unittest.mock import patch
from dstauffman import capture_output, get_tests_dir, HAVE_MPL, HAVE_NUMPY, LogLevel, \
NP_DATETIME_FORM, NP_INT64_PER_SEC, NP_TIMEDELTA_FORM, unit
import dstauffman.plotting as plot
if HAVE_MPL:
import matplotlib.pyplot as plt
if HAVE_NUMPY:
import numpy as np
inf = np.inf
else:
from math import inf
#%% plotting.Opts
class Test_plotting_Opts(unittest.TestCase):
r"""
Tests the plotting.Opts class with the following cases:
normal mode
add new attribute to existing instance
"""
def setUp(self) -> None:
self.opts_fields = ['case_name']
def test_calling(self) -> None:
opts = plot.Opts()
for field in self.opts_fields:
self.assertTrue(hasattr(opts, field))
def test_new_attr(self) -> None:
opts = plot.Opts()
with self.assertRaises(AttributeError):
opts.new_field_that_does_not_exist = 1 # type: ignore[attr-defined]
def test_get_names_successful(self) -> None:
opts = plot.Opts()
opts.names = ['Name 1', 'Name 2']
name = opts.get_names(0)
self.assertEqual(name, 'Name 1')
def test_get_names_unsuccessful(self) -> None:
opts = plot.Opts()
opts.names = ['Name 1', 'Name 2']
name = opts.get_names(2)
self.assertEqual(name, '')
def test_get_date_zero_str(self) -> None:
opts = plot.Opts()
date_str = opts.get_date_zero_str()
self.assertEqual(date_str, '')
opts.date_zero = datetime.datetime(2019, 4, 1, 18, 0, 0)
date_str = opts.get_date_zero_str()
self.assertEqual(date_str,' t(0) = 01-Apr-2019 18:00:00 Z')
def get_time_limits(self) -> None:
opts = plot.Opts()
opts.disp_xmin = 60
opts.disp_xmax = inf
opts.rms_xmin = -inf
opts.rms_xmax = None
opts.time_base = 'sec'
opts.time_unit = 'min'
(d1, d2, r1, r2) = opts.get_time_limits()
self.assertEqual(d1, 1)
self.assertEqual(d2, inf)
self.assertEqual(r1, -inf)
self.assertIsNone(r2)
def get_time_limits2(self) -> None:
opts = plot.Opts().convert_dates('datetime')
opts.disp_xmin = datetime.datetime(2020, 6, 1, 0, 0, 0)
opts.disp_xmax = datetime.datetime(2020, 6, 1, 12, 0, 0)
(d1, d2, r1, r2) = opts.get_time_limits()
self.assertEqual(d1, datetime.datetime(2020, 6, 1, 0, 0, 0))
self.assertEqual(d2, datetime.datetime(2020, 6, 1, 12, 0, 0))
self.assertIsNone(r1)
self.assertIsNone(r2)
def test_pprint(self) -> None:
opts = plot.Opts()
with capture_output() as out:
opts.pprint(indent=2)
lines = out.getvalue().strip().split('\n')
out.close()
self.assertEqual(lines[0], 'Opts')
self.assertEqual(lines[1], ' case_name = ')
self.assertEqual(lines[3], ' save_plot = False')
self.assertEqual(lines[-1], ' names = []')
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
def test_convert_dates(self) -> None:
opts = plot.Opts()
self.assertEqual(opts.disp_xmin, -inf)
self.assertEqual(opts.time_base, 'sec')
opts.convert_dates('datetime')
self.assertIsNone(opts.disp_xmin)
self.assertEqual(opts.time_base, 'datetime')
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
def test_convert_dates2(self) -> None:
opts = plot.Opts(date_zero=datetime.datetime(2020, 6, 1))
opts.rms_xmin = -10
opts.rms_xmax = 10
opts.disp_xmin = 5
opts.disp_xmax = 150
opts.convert_dates('datetime')
self.assertEqual(opts.time_base, 'datetime')
self.assertEqual(opts.rms_xmin, datetime.datetime(2020, 5, 31, 23, 59, 50))
self.assertEqual(opts.rms_xmax, datetime.datetime(2020, 6, 1, 0, 0, 10))
self.assertEqual(opts.disp_xmin, datetime.datetime(2020, 6, 1, 0, 0, 5))
self.assertEqual(opts.disp_xmax, datetime.datetime(2020, 6, 1, 0, 2, 30))
#%% plotting.suppress_plots and plotting.unsupress_plots
class Test_plotting_Plotter(unittest.TestCase):
r"""
Tests the plotting.Plotter class with the following cases:
Suppress and Unsuppress
"""
@classmethod
def setUpClass(cls) -> None:
cls.orig_flag = plot.plotting._Plotter # type: ignore[attr-defined]
def test_suppress_and_unsupress(self) -> None:
plot.suppress_plots()
self.assertFalse(plot.plotting._Plotter)
plot.unsuppress_plots()
self.assertTrue(plot.plotting._Plotter)
def tearDown(self) -> None:
if self.orig_flag: # type: ignore[attr-defined]
plot.unsuppress_plots()
else:
plot.suppress_plots()
#%% plotting.plot_time_history
@unittest.skipIf(not HAVE_MPL, 'Skipping due to missing matplotlib dependency.')
class Test_plotting_plot_time_history(unittest.TestCase):
r"""
Tests the plotting.plot_time_history function with the following cases:
Nominal
Defaults
With label
With type
With Opts
With legend
No data
Ignore all zeros
Bad legend
Show zero
"""
def setUp(self) -> None:
self.description = 'Plot description'
self.time = np.arange(0, 10, 0.1) + 2000
num_channels = 5
self.row_data = np.random.rand(len(self.time), num_channels)
mag = np.sum(self.row_data, axis=1)
self.row_data = 10 * self.row_data / np.expand_dims(mag, axis=1)
self.col_data = self.row_data.T.copy()
self.units = 'percentage'
self.opts = plot.Opts()
self.opts.show_plot = False
self.elements = ['Value 1', 'Value 2', 'Value 3', 'Value 4', 'Value 5']
self.figs: List[plt.Figure] = []
def test_nominal(self) -> None:
self.figs.append(plot.plot_time_history(self.description, self.time, self.row_data, \
opts=self.opts, data_as_rows=False))
def test_defaults(self) -> None:
self.figs.append(plot.plot_time_history('', self.time, self.col_data))
def test_with_units(self) -> None:
self.figs.append(plot.plot_time_history(self.description, self.time, self.col_data, units=self.units))
def test_with_opts(self) -> None:
self.figs.append(plot.plot_time_history(self.description, self.time, self.col_data, opts=self.opts))
@patch('dstauffman.plotting.plotting.logger')
def test_no_data(self, mock_logger):
plot.plot_time_history('', self.time, None)
self.assertEqual(mock_logger.log.call_count, 1)
mock_logger.log.assert_called_with(LogLevel.L5, ' plot skipped due to missing data.')
def test_ignore_zeros(self) -> None:
self.figs.append(plot.plot_time_history(self.description, self.time, self.col_data, ignore_empties=True))
def test_ignore_zeros2(self) -> None:
self.col_data[1, :] = 0
self.col_data[3, :] = 0
self.figs.append(plot.plot_time_history(self.description, self.time, self.col_data, ignore_empties=True))
@patch('dstauffman.plotting.plotting.logger')
def test_ignore_zeros3(self, mock_logger):
self.col_data = np.zeros(self.col_data.shape)
not_a_fig = plot.plot_time_history('All Zeros', self.time, self.col_data, ignore_empties=True)
self.assertIs(not_a_fig, None)
self.assertEqual(mock_logger.log.call_count, 1)
mock_logger.log.assert_called_with(LogLevel.L5, ' All Zeros plot skipped due to missing data.')
def test_not_ndarray(self) -> None:
self.figs.append(plot.plot_time_history('Zero', 0, 0))
def test_0d(self) -> None:
self.figs.append(plot.plot_time_history('Zero', np.array(0), np.array(0)))
def test_1d(self) -> None:
self.figs.append(plot.plot_time_history('Line', np.arange(5), np.arange(5)))
def test_bad_3d(self) -> None:
bad_data = np.random.rand(self.time.shape[0], 4, 5)
with self.assertRaises(AssertionError):
plot.plot_time_history(self.description, self.time, bad_data, opts=self.opts)
def test_datetime(self) -> None:
dates = np.datetime64('2020-01-11 12:00:00') + np.arange(0, 1000, 10).astype('timedelta64[ms]')
self.figs.append(plot.plot_time_history(self.description, dates, self.col_data, opts=self.opts, \
time_units='numpy'))
def test_lists0(self) -> None:
time = np.arange(100, dtype=float)
data = [np.zeros(100), np.ones(100)]
self.figs.append(plot.plot_time_history('', time, data))
def test_lists1(self) -> None:
time = np.arange(10)
data = [np.random.rand(10), 5*np.random.rand(10)]
elements = ('Item 1', '5 Times')
self.figs.append(plot.plot_time_history(self.description, time, data, opts=self.opts, elements=elements))
def test_lists2(self) -> None:
time = [np.arange(5), np.arange(10, dtype=float)]
data = [np.array([0., 0.1, 0.2, 0.3, 0.5]), np.arange(10)]
self.figs.append(plot.plot_time_history(self.description, time, data, opts=self.opts))
def tearDown(self) -> None:
if self.figs:
for this_fig in self.figs:
plt.close(this_fig)
#%% plotting.plot_correlation_matrix
@unittest.skipIf(not HAVE_MPL, 'Skipping due to missing matplotlib dependency.')
class Test_plotting_plot_correlation_matrix(unittest.TestCase):
r"""
Tests the plotting.plot_correlation_matrix function with the following cases:
normal mode
non-square inputs
default labels
all arguments passed in
symmetric matrix
coloring with values above 1
coloring with values below -1
coloring with values in -1 to 1 instead of 0 to 1
x label rotation
bad labels (should raise error)
"""
def setUp(self) -> None:
num = 10
self.figs: List[plt.Figure] = []
self.data = unit(np.random.rand(num, num), axis=0)
self.labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
self.units = 'percentage'
self.opts = plot.Opts()
self.opts.case_name = 'Testing Correlation'
self.matrix_name = 'Not a Correlation Matrix'
self.sym = self.data.copy()
for j in range(num):
for i in range(num):
if i == j:
self.sym[i, j] = 1
elif i > j:
self.sym[i, j] = self.data[j, i]
def test_normal(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data, self.labels))
def test_nonsquare(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data[:5, :3], [self.labels[:3], \
self.labels[:5]]))
def test_default_labels(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data[:5, :3]))
def test_type(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data, units=self.units))
def test_all_args(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data, self.labels, self.units, opts=self.opts, \
matrix_name=self.matrix_name, cmin=0, cmax=1, xlabel='', ylabel='', \
plot_lower_only=False, label_values=True, x_lab_rot=180, colormap='Paired'))
def test_symmetric(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.sym))
def test_symmetric_all(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.sym, plot_lower_only=False))
def test_above_one(self) -> None:
large_data = self.data * 1000
self.figs.append(plot.plot_correlation_matrix(large_data, self.labels))
def test_above_one_part2(self) -> None:
large_data = self.data * 1000
self.figs.append(plot.plot_correlation_matrix(large_data, self.labels, cmax=2000))
def test_below_one(self) -> None:
large_data = 1000*(self.data - 0.5)
self.figs.append(plot.plot_correlation_matrix(large_data, self.labels))
def test_below_one_part2(self) -> None:
large_data = 1000*(self.data - 0.5)
self.figs.append(plot.plot_correlation_matrix(large_data, self.labels, cmin=-2))
def test_within_minus_one(self) -> None:
large_data = self.data - 0.5
self.figs.append(plot.plot_correlation_matrix(large_data, self.labels))
def test_within_minus_one_part2(self) -> None:
large_data = self.data - 0.5
self.figs.append(plot.plot_correlation_matrix(large_data, self.labels, cmin=-1, cmax=1))
def test_xlabel(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data, xlabel='Testing Label'))
def test_ylabel(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data, ylabel='Testing Label'))
def test_x_label_rotation(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data, self.labels, x_lab_rot=0))
def test_nans(self) -> None:
self.data[0, 0] = np.nan
self.figs.append(plot.plot_correlation_matrix(self.data, self.labels))
def test_bad_labels(self) -> None:
with self.assertRaises(ValueError):
self.figs.append(plot.plot_correlation_matrix(self.data, ['a']))
def test_label_values(self) -> None:
self.figs.append(plot.plot_correlation_matrix(self.data, label_values=True))
def tearDown(self) -> None:
for i in range(len(self.figs)):
plt.close(self.figs.pop())
#%% plotting.plot_bar_breakdown
@unittest.skipIf(not HAVE_MPL, 'Skipping due to missing matplotlib dependency.')
class Test_plotting_plot_bar_breakdown(unittest.TestCase):
r"""
Tests the plotting.plot_bar_breakdown function with the following cases:
Nominal
Defaults
With label
With opts
With legend
Null data
Bad legend
With Colormap
"""
def setUp(self) -> None:
self.time = np.arange(0, 5, 1./12) + 2000
num_bins = 5
self.data = np.random.rand(num_bins, len(self.time))
mag = np.sum(self.data, axis=0)
self.data = self.data / np.expand_dims(mag, axis=0)
self.description = 'Plot bar testing'
self.elements = ['Value 1', 'Value 2', 'Value 3', 'Value 4', 'Value 5']
self.opts = plot.Opts()
self.opts.show_plot = False
self.figs: List[plt.Figure] = []
def test_nominal(self) -> None:
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data, opts=self.opts, \
elements=self.elements))
def test_defaults(self) -> None:
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data))
def test_opts(self) -> None:
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data, opts=self.opts))
def test_elements(self) -> None:
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data, elements=self.elements))
def test_ignore_zeros(self) -> None:
self.data[:, 1] = 0
self.data[:, 3] = np.nan
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data, ignore_empties=True))
@patch('dstauffman.plotting.plotting.logger')
def test_null_data(self, mock_logger):
plot.plot_bar_breakdown('', self.time, None)
self.assertEqual(mock_logger.log.call_count, 1)
mock_logger.log.assert_called_with(LogLevel.L5, ' plot skipped due to missing data.')
def test_colormap(self) -> None:
self.opts.colormap = 'Dark2'
colormap = 'Paired'
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data, \
opts=self.opts, colormap=colormap))
def test_bad_elements(self) -> None:
with self.assertRaises(AssertionError):
plot.plot_bar_breakdown(self.description, self.time, self.data, elements=self.elements[:-1])
def test_single_point(self) -> None:
self.figs.append(plot.plot_bar_breakdown(self.description, self.time[:1], self.data[:, :1]))
def test_new_colormap(self) -> None:
self.opts.colormap = 'seismic'
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data, opts=self.opts))
def test_datetime(self) -> None:
dates = np.datetime64('2020-01-11 12:00:00') + np.arange(0, 7200, 120).astype('timedelta64[s]')
self.figs.append(plot.plot_bar_breakdown(self.description, dates, self.data, opts=self.opts, \
time_units='numpy'))
def test_data_as_rows(self) -> None:
self.figs.append(plot.plot_bar_breakdown(self.description, self.time, self.data.T.copy(), opts=self.opts, \
elements=self.elements, data_as_rows=False))
def tearDown(self) -> None:
if self.figs:
for this_fig in self.figs:
plt.close(this_fig)
#%% plotting.plot_histogram
@unittest.skipIf(not HAVE_MPL, 'Skipping due to missing matplotlib dependency.')
class Test_plotting_plot_histogram(unittest.TestCase):
r"""
Tests the plotting.setup_plots function with the following cases:
Nominal
All inputs
Datetimes
"""
def setUp(self) -> None:
self.description = 'Histogram'
self.data = np.array([0.5, 3.3, 1., 1.5, 1.5, 1.75, 2.5, 2.5])
self.bins = np.array([0., 1., 2., 3., 5., 7.])
self.fig: Optional[plt.Figure] = None
def test_nominal(self) -> None:
self.fig = plot.plot_histogram(self.description, self.data, self.bins)
def test_with_opts(self) -> None:
opts = plot.Opts()
self.fig = plot.plot_histogram(self.description, self.data, self.bins, opts=opts, color='xkcd:black', \
xlabel='Text', ylabel='Num', second_ylabel='Dist')
def test_datetimes(self) -> None:
date_zero = np.datetime64(datetime.date(2021, 2, 1)).astype(NP_DATETIME_FORM) # type: ignore[call-overload]
data_np = date_zero + np.round(NP_INT64_PER_SEC * self.data).astype(NP_TIMEDELTA_FORM)
bins_np = date_zero + np.round(NP_INT64_PER_SEC * self.bins).astype(NP_TIMEDELTA_FORM)
# TODO: would prefer to handle this case better
self.fig = plot.plot_histogram(self.description, data_np.astype(np.int64), bins_np.astype(np.int64))
def test_infs(self) -> None:
self.fig = plot.plot_histogram(self.description, self.data, np.array([-np.inf, -1., 0., 1., np.inf]))
def test_int_cats(self) -> None:
data = np.array([3, 3, 5, 8, 2, 2, 2])
bins = np.array([1, 2, 3, 4, 5])
self.fig = plot.plot_histogram(self.description, data, bins, use_exact_counts=True)
def test_string_cats(self) -> None:
data = np.full(10, 'yes', dtype='S8')
data[2] = 'no'
data[8] = 'no'
data[5] = 'unknown'
bins = [b'yes', b'no']
self.fig = plot.plot_histogram(self.description, data, bins, use_exact_counts=True)
def test_missing_data(self) -> None:
with self.assertRaises(ValueError):
plot.plot_histogram(self.description, self.data, np.array([3, 10, 15]))
def test_missing_exacts(self) -> None:
self.fig = plot.plot_histogram(self.description, np.array([1, 1, 1, 2, 3, 3, 3]), \
np.array([0, 3, 6]), use_exact_counts=True)
def tearDown(self) -> None:
if self.fig:
plt.close(self.fig)
#%% plotting.setup_plots
@unittest.skipIf(not HAVE_MPL, 'Skipping due to missing matplotlib dependency.')
class Test_plotting_setup_plots(unittest.TestCase):
r"""
Tests the plotting.setup_plots function with the following cases:
Prepend a title
Don't prepend a title
Don't show the plot
Multiple figures
Save the plot
Show the plot link
"""
def setUp(self) -> None:
self.fig = plt.figure()
self.fig.canvas.manager.set_window_title('Figure Title')
ax = self.fig.add_subplot(111)
x = np.arange(0, 10, 0.1)
y = np.sin(x)
ax.plot(x, y)
ax.set_title('X vs Y')
ax.set_xlabel('time [years]')
ax.set_ylabel('value [radians]')
self.opts = plot.Opts()
self.opts.case_name = 'Testing'
self.opts.show_plot = True
self.opts.save_plot = False
self.opts.save_path = get_tests_dir()
def test_title(self) -> None:
plot.setup_plots(self.fig, self.opts)
def test_no_title(self) -> None:
self.opts.case_name = ''
plot.setup_plots(self.fig, self.opts)
def test_not_showing_plot(self) -> None:
self.opts.show_plot = False
plot.setup_plots(self.fig, self.opts)
def test_multiple_figs(self) -> None:
fig_list = [self.fig]
(new_fig, ax) = plt.subplots()
ax.plot(0, 0)
fig_list.append(new_fig)
plot.setup_plots(fig_list, self.opts)
plt.close(new_fig)
def test_saving_plot(self) -> None:
this_filename = get_tests_dir().joinpath(self.opts.case_name + ' - Figure Title.png')
self.opts.save_plot = True
plot.setup_plots(self.fig, self.opts)
# remove file
this_filename.unlink(missing_ok=True)
def test_show_link(self) -> None:
this_filename = get_tests_dir().joinpath(self.opts.case_name + ' - Figure Title.png')
self.opts.save_plot = True
self.opts.show_link = True
with capture_output() as out:
plot.setup_plots(self.fig, self.opts)
output = out.getvalue().strip()
out.close()
# remove file
this_filename.unlink(missing_ok=True)
self.assertTrue(output.startswith('Plots saved to <a href="'))
def tearDown(self) -> None:
plt.close(self.fig)
#%% Unit test execution
if __name__ == '__main__':
plot.suppress_plots()
unittest.main(exit=False)
| lgpl-3.0 |
jaeilepp/eggie | mne/tests/test_source_estimate.py | 1 | 24908 | from __future__ import print_function
import os.path as op
from nose.tools import assert_true, assert_raises
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy.fftpack import fft
from mne.datasets import sample
from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
read_source_spaces)
from mne import read_source_estimate, morph_data, extract_label_time_course
from mne.source_estimate import (spatio_temporal_tris_connectivity,
spatio_temporal_src_connectivity,
compute_morph_matrix, grade_to_vertices)
from mne.minimum_norm import read_inverse_operator
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
requires_pytables)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-inv.fif')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-vol-7-fwd.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
tempdir = _TempDir()
@sample.requires_sample_data
def test_volume_stc():
"""Test volume STCs
"""
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertno)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertno, stc_new.vertno)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
except ImportError:
print('Save as nifti test skipped, needs NiBabel')
@sample.requires_sample_data
def test_expand():
"""Test stc expansion
"""
stc = read_source_estimate(fname, 'sample')
assert_true('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
stc_limited = stc.in_label(labels_lh[0] + labels_lh[1])
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertno)
# make sure we can't add unless vertno agree
assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
def test_io_stc():
"""Test IO for STC files
"""
stc = _fake_stc()
stc.save(op.join(tempdir, "tmp.stc"))
stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertno), len(stc2.vertno))
for v1, v2 in zip(stc.vertno, stc2.vertno):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
@requires_pytables()
def test_io_stc_h5():
"""Test IO for STC files using HDF5
"""
stc = _fake_stc()
assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
out_name = op.join(tempdir, 'tmp')
stc.save(out_name, ftype='h5')
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc.h5')
assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
for stc_new in stc3, stc4:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertno), len(stc.vertno))
for v1, v2 in zip(stc_new.vertno, stc.vertno):
assert_array_equal(v1, v2)
def test_io_w():
"""Test IO for w files
"""
stc = _fake_stc(n_time=1)
w_fname = op.join(tempdir, 'fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(op.join(tempdir, 'tmp'), ftype='w')
src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
stc = _fake_stc()
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
@sample.requires_sample_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
"""
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc = read_source_estimate(fname)
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
bin = stc.bin(.12)
a = np.array((1,), dtype=stc.data.dtype)
a[0] = np.mean(stc.data[0, stc.times < .12])
assert a[0] == bin.data[0, 0]
assert_raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert_true(hemi == 1)
# XXX Should design a fool-proof test case, but here were the results:
assert_true(vertex == 90186)
assert_true(np.round(t, 3) == 0.123)
stc = read_source_estimate(fname)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert_true(stc_new.data.shape[1] == stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@sample.requires_sample_data
def test_extract_label_time_course():
"""Test extraction of label time courses from stc
"""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
vertices = [src[0]['vertno'], src[1]['vertno']]
n_verts = len(vertices[0]) + len(vertices[1])
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
# compute the mean with sign flip
label_means_flipped = np.zeros_like(label_means)
for i, label in enumerate(labels):
label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
# generate some stc's with known data
stcs = list()
for i in range(n_stcs):
data = np.zeros((n_verts, n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_means[j]
this_stc = SourceEstimate(data, vertices, 0, 1)
stcs.append(this_stc)
# test some invalid inputs
assert_raises(ValueError, extract_label_time_course, stcs, labels,
src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
src, mode='mean')
# but this works:
tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
allow_empty=True)
for arr in tc:
assert_true(arr.shape == (1, n_times))
assert_array_equal(arr, np.zeros((1, n_times)))
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max']
for mode in modes:
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode) for stc in stcs]
assert_true(len(label_tc) == n_stcs)
assert_true(len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert_true(tc1.shape == (n_labels, n_times))
assert_true(tc2.shape == (n_labels, n_times))
assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
if mode == 'mean':
assert_array_almost_equal(tc1, label_means)
if mode == 'mean_flip':
assert_array_almost_equal(tc1, label_means_flipped)
if mode == 'max':
assert_array_almost_equal(tc1, label_maxs)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src)
assert_true(len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src)
assert_true(x.size == 0)
@sample.requires_sample_data
def test_morph_data():
"""Test morphing of data
"""
subject_from = 'sample'
subject_to = 'fsaverage'
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc_from = read_source_estimate(fname, subject='sample')
fname = op.join(data_path, 'MEG', 'sample', 'fsaverage_audvis-meg')
stc_to = read_source_estimate(fname)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
# make sure we can specify vertices
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
# make sure we can use different buffer_size
stc_to3 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=3,
subjects_dir=subjects_dir)
assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
assert_array_almost_equal(stc_to1.data, stc_to2.data)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
# make sure precomputed morph matrices work
morph_mat = compute_morph_matrix(subject_from, subject_to,
stc_from.vertno, vertices_to,
smooth=12, subjects_dir=subjects_dir)
stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
# make sure we can fill by morphing
stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
smooth=12, buffer_size=3, subjects_dir=subjects_dir)
assert_true(stc_to5.data.shape[0] == 163842 + 163842)
# test morphing to the same subject
stc_to6 = stc_from.morph(subject_from, grade=stc_from.vertno, smooth=1,
subjects_dir=subjects_dir)
mask = np.ones(stc_from.data.shape[0], dtype=np.bool)
# XXX: there is a bug somewhere that causes a difference at 2 vertices..
mask[6799] = False
mask[6800] = False
assert_array_almost_equal(stc_from.data[mask], stc_to6.data[mask], 5)
# Morph sparse data
# Make a sparse stc
stc_from.vertno[0] = stc_from.vertno[0][[100, 500]]
stc_from.vertno[1] = stc_from.vertno[1][[200]]
stc_from._data = stc_from._data[:3]
assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
grade=5, subjects_dir=subjects_dir)
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
stc_from.vertno[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result"""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data"""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = np.random.randn(n_vertices, n_sensors)
sens_data = np.random.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
def test_transform():
"""Test applying linear (time) transform to data"""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertno, stcs_t[0].vertno)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
tmax_idx=t_idx[-1])
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
assert_true(len(stc.vertno[0]) == 0)
assert_equal(stc.vertno[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity from triangles"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert_true(len(new_fmt), len(components))
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@sample.requires_sample_data
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity from source spaces"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
# add test for dist connectivity
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
assert_array_equal(connectivity.todense(), connectivity3.todense())
# add test for source space connectivity with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_ = inverse_operator['src']
connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
assert len(w) == 1
a = connectivity.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert_true(a == b)
@requires_pandas
def test_as_data_frame():
"""Test stc Pandas exporter"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
assert_raises(ValueError, stc.as_data_frame, index=['foo', 'bar'])
for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
df = stc.as_data_frame(index=ind)
assert_true(df.index.names == ind
if isinstance(ind, list) else [ind])
assert_array_equal(df.values.T[ncat:], stc.data)
# test that non-indexed data were present as categorial variables
with warnings.catch_warnings(record=True): # pandas
df.reset_index().columns[:3] == ['subject', 'time']
def test_get_peak():
"""Test peak getter
"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for ii, stc in enumerate([stc_surf, stc_vol]):
assert_raises(ValueError, stc.get_peak, tmin=-100)
assert_raises(ValueError, stc.get_peak, tmax=90)
assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertno) if ii == 0 else stc.vertno
assert_true(vert_idx in vertno)
assert_true(time_idx in stc.times)
ch_idx, time_idx = stc.get_peak(vert_as_index=True,
time_as_index=True)
assert_true(vert_idx < stc.data.shape[0])
assert_true(time_idx < len(stc.times))
| bsd-2-clause |
r-mart/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/IPython/core/pylabtools.py | 2 | 14139 | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'inline': 'module://ipykernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://ipykernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = {
"format":fmt,
"facecolor":fig.get_facecolor(),
"edgecolor":fig.get_edgecolor(),
"dpi":dpi,
"bbox_inches":bbox_inches,
}
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = {"width": w//2, "height":h//2}
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, str):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline','agg').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline','agg').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
if gui == 'agg':
gui = None
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = {}
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
| mit |
hunse/deepnet | deepnet/autoencoder/algorithms.py | 1 | 8602 |
import collections
import time
import numpy as np
import numpy.random
import scipy as sp
import scipy.optimize
import theano
import theano.tensor as T
from .. import image_tools as imtools
# def sgd_minibatch_fn(trainer, rate, clip=None):
# x = T.matrix('x', dtype=trainer.dtype)
# cost, ups = trainer.get_cost_updates(x)
# grads = trainer.grads(cost)
# updates = collections.OrderedDict(ups)
# rate = T.cast(rate, dtype=trainer.dtype)
# for param, grad in zip(trainer.params, grads):
# if clip is not None:
# grad = grad.clip(*clip)
# updates[param] = param - rate*grad
# return theano.function([x], cost, updates=updates,
# allow_input_downcast=True)
def sgd_minibatch_fn(trainer, rate, clip=None):
x = T.matrix('x', dtype=trainer.dtype)
cost, grads, updates = trainer.get_cost_grads_updates(x)
rate = T.cast(rate, dtype=trainer.dtype)
for param in trainer.params:
grad = grads[param]
if clip is not None:
grad = grad.clip(*clip)
updates[param] = param - rate*grad
return theano.function([x], cost, updates=updates.items(),
allow_input_downcast=True)
def sgd(trainer, images, timages=None, test_fn=None,
n_epochs=30, rate=0.05, clip=(-1,1),
show=imtools.display_available(), vlims=None,
save_fn=None):
"""
Unsupervised training using Stochasitc Gradient Descent (SGD)
"""
if timages is None:
timages = images[:500]
print "Performing SGD on a %d x %d autoencoder for %d epochs"\
% (trainer.network.nvis, trainer.network.nhid, n_epochs)
print "SGD params: %s" % dict(n_epochs=n_epochs, rate=rate, clip=clip)
print "Trainer params: %s" % trainer.train_hypers
### create minibatch learning function
train = sgd_minibatch_fn(trainer, rate=rate, clip=clip)
exshape = images.shape[:1]
imshape = images.shape[1:]
# if len(exshape) == 1:
# ### split into batches
# batchlen = 100
# batches = images.reshape((-1, batchlen, np.prod(imshape)))
# elif len(exshape) == 2:
# ### already in batches, so just collapse the shape
# batches = images.reshape(exshape + (np.prod(imshape),))
# else:
# raise ValueError("Invalid input image shape %s" % images.shape)
### split into batches
batchlen = 100
batches = images.reshape((-1, batchlen, np.prod(imshape)))
stats = dict(algorithm='sgd', n_epochs=0, cost=[],
hypers=dict(trainer.train_hypers))
trainer.network.train_stats.append(stats)
for epoch in xrange(n_epochs):
# rate = rates[epoch] if epoch < len(rates) else rates[-1]
cost = 0
t = time.time()
for batch in batches:
cost += train(batch)
t = time.time() - t
test_stats = test(trainer, timages,
test_fn=test_fn, show=show, fignum=101, vlims=vlims)
stats['n_epochs'] += 1
stats['cost'].append(cost)
for k, v in test_stats.items():
if k not in stats: stats[k] = []
stats[k].append(v)
print "Epoch %d finished, t = %0.2f s, cost = %0.3e, %s" \
% (epoch, t, cost, str(["%s = %0.2e" % (k,v) for k,v in test_stats.items()]))
if save_fn is not None:
save_fn()
# trainer.network.to_file(save_name)
return stats
def lbfgs(trainer, images, timages=None, test_fn=None,
n_evals=10, clip=None,
show=imtools.display_available(), vlims=None,
save_fn=None):
"""
Unsupervised training using limited-memory BFGS (L-BFGS)
"""
batchlen = 5000
if timages is None:
timages = images[:500]
print "Performing L-BFGS on a %d x %d autoencoder for %d function evals"\
% (trainer.network.nvis, trainer.network.nhid, n_evals)
print "L-BFGS params: %s" % dict(n_evals=n_evals, clip=clip)
print "Trainer params: %s" % trainer.train_hypers
test_stats = test(trainer, timages, test_fn=test_fn,
show=show, fignum=101, vlims=vlims)
print ["%s = %0.3e" % (k,v) for k,v in test_stats.items()]
### Make functions to put parameters into one vector, and get them back.
params = [p.get_value(borrow=False) for p in trainer.params]
def split_params(params_vect):
params_list = []
i = 0
for p in params:
params_list.append(params_vect[i : i + p.size].reshape(p.shape))
i += p.size
return params_list
def concat_params(params_list):
return np.hstack([p.flatten() for p in params_list])
p0 = concat_params(params).astype('float64')
### make Theano function
s_p = T.vector(dtype=trainer.dtype)
s_params = split_params(s_p)
s_x = T.matrix('x', dtype=trainer.dtype)
cost, grads, updates = trainer.get_cost_grads_updates(s_x)
grads = [grads[p] for p in trainer.params]
f_df = theano.function([s_x, s_p], [cost] + grads, updates=updates.items(),
givens=zip(trainer.params, s_params),
allow_input_downcast=True)
### make optimization function
stats = dict(algorithm='lbfgs', n_evals=0, cost=[],
hypers=dict(trainer.train_hypers))
trainer.network.train_stats.append(stats)
# flatten images, and get indices
images = images.reshape((images.shape[0], np.prod(images.shape[1:])))
images_i = np.arange(len(images))
def f_df_cast(p):
t = time.time()
i = np.random.choice(images_i, size=batchlen)
x = images[i]
outs = f_df(x, p)
cost, grads = outs[0], outs[1:]
grad = concat_params(grads)
if clip is not None:
grad = grad.clip(*clip)
t = time.time() - t
### test
if 1:
for param, value in zip(trainer.params, split_params(p)):
param.set_value(value.astype(param.dtype), borrow=False)
test_stats = test(trainer, timages, test_fn=test_fn,
show=show, fignum=101, vlims=vlims)
stats['n_evals'] += 1
stats['cost'].append(cost)
for k, v in test_stats.items():
if k not in stats: stats[k] = []
stats[k].append(v)
print "Eval %d finished, t = %0.2f s, cost = %0.3e, %s" \
% (stats['n_evals'], t, cost,
str(["%s = %0.2e" % (k,v) for k,v in test_stats.items()]))
return cost.astype('float64'), grad.astype('float64')
### perform optimization
t = time.time()
p_opt, mincost, info_dct = sp.optimize.lbfgsb.fmin_l_bfgs_b(
f_df_cast, p0, maxfun=n_evals, iprint=1)
t = time.time() - t
for param, opt_value in zip(trainer.params, split_params(p_opt)):
param.set_value(opt_value.astype(param.dtype), borrow=False)
print "Done. t = %0.3f s, cost = %0.2e" % (t, mincost)
test_stats = test(trainer, timages, test_fn=test_fn,
show=show, fignum=101, vlims=vlims)
print ["%s = %0.3e" % (k,v) for k,v in test_stats.items()]
def test(trainer, timages, test_fn=None,
show=imtools.display_available(), fignum=None, vlims=None):
# from ..image_tools import *
# from ..image_tools import compare, activations, filters
from .. import image_tools as imtools
if test_fn is None:
test_fn = trainer.network.compVHVraw
imshape = timages.shape[1:]
ims_shape = (-1,) + imshape
x = timages.reshape((len(timages), -1))
ha, h, ya, y = test_fn(x)
rmse = imtools.rmse(x, y).mean()
act = h.mean(axis=0)
# test_stats = {'rmse': rmse, 'hidact': act}
test_stats = collections.OrderedDict(
rmse=rmse, hid_mean=act.mean(), hid_min=act.min(), hid_max=act.max())
### Show current results
if show:
import matplotlib.pyplot as plt
# image_tools.figure(fignum=fignum, figsize=(12,12))
plt.figure(fignum)
plt.clf()
rows, cols = 3, 2
ax = plt.subplot(rows, 1, 1)
imtools.compare([x.reshape(ims_shape), y.reshape(ims_shape)], vlims=vlims)
ax = plt.subplot(rows, cols, cols+1)
imtools.activations(ha, trainer.network.f.eval)
ax = plt.subplot(rows, cols, cols+2)
imtools.activations(ya, trainer.network.g.eval)
ax = plt.subplot(rows, 1, 3)
imtools.filters(trainer.network.filters, rows=8, cols=16)
plt.tight_layout()
plt.draw()
return test_stats
| mit |
walterreade/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
NelisVerhoef/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.21/_downloads/83f7eda14270eb609ba6c9fc83ca49f5/plot_muscle_detection.py | 18 | 3308 | """
===========================
Annotate muscle artifacts
===========================
Muscle contractions produce high frequency activity that can mask brain signal
of interest. Muscle artifacts can be produced when clenching the jaw,
swallowing, or twitching a cranial muscle. Muscle artifacts are most
noticeable in the range of 110-140 Hz.
This example uses :func:`~mne.preprocessing.annotate_muscle_zscore` to annotate
segments where muscle activity is likely present. This is done by band-pass
filtering the data in the 110-140 Hz range. Then, the envelope is taken using
the hilbert analytical signal to only consider the absolute amplitude and not
the phase of the high frequency signal. The envelope is z-scored and summed
across channels and divided by the square root of the number of channels.
Because muscle artifacts last several hundred milliseconds, a low-pass filter
is applied on the averaged z-scores at 4 Hz, to remove transient peaks.
Segments above a set threshold are annotated as ``BAD_muscle``. In addition,
the ``min_length_good`` parameter determines the cutoff for whether short
spans of "good data" in between muscle artifacts are included in the
surrounding "BAD" annotation.
"""
# Authors: Adonay Nunes <[email protected]>
# Luke Bloy <[email protected]>
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
import numpy as np
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
from mne.preprocessing import annotate_muscle_zscore
# Load data
data_path = bst_auditory.data_path()
raw_fname = op.join(data_path, 'MEG', 'bst_auditory', 'S01_AEF_20131218_01.ds')
raw = read_raw_ctf(raw_fname, preload=False)
raw.crop(130, 160).load_data() # just use a fraction of data for speed here
raw.resample(300, npad="auto")
###############################################################################
# Notch filter the data:
#
# .. note::
# If line noise is present, you should perform notch-filtering *before*
# detecting muscle artifacts. See :ref:`tut-section-line-noise` for an
# example.
raw.notch_filter([50, 100])
###############################################################################
# The threshold is data dependent, check the optimal threshold by plotting
# ``scores_muscle``.
threshold_muscle = 5 # z-score
# Choose one channel type, if there are axial gradiometers and magnetometers,
# select magnetometers as they are more sensitive to muscle activity.
annot_muscle, scores_muscle = annotate_muscle_zscore(
raw, ch_type="mag", threshold=threshold_muscle, min_length_good=0.2,
filter_freq=[110, 140])
###############################################################################
# Plot muscle z-scores across recording
# --------------------------------------------------------------------------
fig, ax = plt.subplots()
ax.plot(raw.times, scores_muscle)
ax.axhline(y=threshold_muscle, color='r')
ax.set(xlabel='time, (s)', ylabel='zscore', title='Muscle activity')
###############################################################################
# View the annotations
# --------------------------------------------------------------------------
order = np.arange(144, 164)
raw.set_annotations(annot_muscle)
raw.plot(start=5, duration=20, order=order)
| bsd-3-clause |
juswilliams/bioscripts | TE_count/TEcount_modules.py | 1 | 5308 | #!/usr/bin/env python
'''
module for TE_count script
'''
import Bio.SeqUtils
import gffutils
import argparse
import os
import sys
import re
import pandas as pd
import numpy as np
from ggplot import *
# Retrieve args from command line, usage help
def parseArguments():
# Create argument parser
parser = argparse.ArgumentParser()
# Positional mandatory arguments
parser.add_argument("FastaFile", help="Fasta File.", type=str)
# Optional arguments
parser.add_argument("-s", help="Step Size.", type=int, default=250)
parser.add_argument("-w", help="Window Size.", type=int, default=1000)
parser.add_argument("-r", help="Run RepeatMasker.",
type=bool, default=False)
parser.add_argument("-gff", help="Matching Repeatmasker GFF File.",
type=str, default="empty.gff")
# Parse arguments
args = parser.parse_args()
return args
# Class contains TE by window, sequence length
class TESeqRec:
def __init__(self, SeqId, SeqLength, WinSize, Step):
self.SeqId = SeqId
self.SeqLength = SeqLength
self.WinSize = WinSize
self.Step = Step
# Pre-Determine the number of TE ranges
NumOfChunks = ((self.SeqLength-self.WinSize)/self.Step)+1
self.NumOfChunks = NumOfChunks
# Create Ranges formatted for GFFutils
def get_tes(self, GffDb):
Region = ""
BpBin = []
SeqIdList = []
TEcounts = []
ForGGplot = []
for i in range(0, self.NumOfChunks*self.Step, self.Step):
WinStart = i
WinStop = i + self.WinSize
Region = str(self.SeqId) + ":" + str(WinStart) + "-" + str(WinStop)
TEhits = GffDb.region(strand=None, region=Region,
completely_within=False)
hits = len(list(TEhits))
SeqIdList.append(self.SeqId)
BpBin.append(WinStop)
TEcounts.append(hits)
ForGGplot.append(SeqIdList)
ForGGplot.append(BpBin)
ForGGplot.append(TEcounts)
return ForGGplot
# GFF check (insure Repeatmasker format)
def gff_check(GffFile):
cond1 = False
cond2 = False
cond3 = False
with open(GffFile, "r") as GFF:
for line in GFF:
SplitLine = line.split("\t")
if len(SplitLine) == 9:
cond1 = True
if SplitLine[2] == "similarity":
cond2 = True
if "Motif:" in SplitLine[8]:
cond3 = True
break
if cond1 and cond2 and cond3:
return True
else:
print "This RepeatMasker GFF file may not be compatible\n"
print ("Consider replacing the last (9th) column, or renaming "
+ "the 3rd column to 'similarity'")
while True:
Reply = raw_input('Continue Anyway? (Y/N)... ')
if Reply == 'Y':
return True
elif Reply == 'N':
sys.exit('Exiting script...')
else:
print 'Incorrect input. Use Y/N'
# GFF edit repeatmasker
def edit_gff(GffFile, tempfile):
try:
pattern = re.compile(r'\"Motif\:.+\"')
with open(GffFile, "r") as GFF:
with open(tempfile, "w") as tempGFF:
for line in GFF:
SplitLine = line.split("\t")
if len(SplitLine) < 9:
tempGFF.write(str(line))
if len(SplitLine) == 9:
result = pattern.search(str(SplitLine[8]))
if result:
string = result.group(0)
newstr = string.replace("\"", "")
newstr = re.sub('Motif:', 'Motif=', newstr)
del SplitLine[-1]
SplitLine.append(newstr)
tempGFF.write("\t".join(SplitLine) + "\n")
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('error formatting GFF file...')
# Insure WinSize, SeqRecord, and Step are correct lengths
def arg_seqcheck(SeqRecord, WinSize, Step):
if not ((type(WinSize) == type(0)) and (type(Step) == type(0))):
raise Exception("**NOTE type(WinSize) and type(Step) must be int.")
if Step > WinSize:
raise Exception("**NOTE Step must not be larger than WinSize.")
if WinSize > len(SeqRecord.seq):
raise Exception('''**NOTE WinSize must not
be larger than Sequence length.''')
# Generate GGplot of TE density
def gen_fig(SeqId, ForGGplot):
OutFile = str(SeqId) + ".png"
try:
TEdf = pd.DataFrame({
'SeqId': ForGGplot[0],
'xval': ForGGplot[1],
'yval': ForGGplot[2]})
p = ggplot(aes(x='xval', y='yval'), data=TEdf) \
+ geom_line() \
+ ylab("RepeatMasker TE Density") \
+ xlab("Position (bp) in Scaffold " + str(SeqId)) \
+ ggtitle(str(SeqId)
+ " Transposable Element Density as predicted by RepeatMasker")
p.save(OutFile)
except IndexError as e:
print 'Error: '+ str(e)
sys.exit('Exiting script...')
| gpl-3.0 |
yyjiang/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
ElDeveloper/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
mahak/spark | python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_rolling.py | 15 | 3500 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class OpsOnDiffFramesGroupByRollingTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
def _test_groupby_rolling_func(self, f):
pser = pd.Series([1, 2, 3], name="a")
pkey = pd.Series([1, 2, 3], name="a")
psser = ps.from_pandas(pser)
kkey = ps.from_pandas(pkey)
self.assert_eq(
getattr(psser.groupby(kkey).rolling(2), f)().sort_index(),
getattr(pser.groupby(pkey).rolling(2), f)().sort_index(),
)
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pkey = pd.Series([1, 2, 3, 2], name="a")
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
getattr(psdf.groupby(kkey).rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey).rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)["b"].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey)["b"].rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)[["b"]].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey)[["b"]].rolling(2), f)().sort_index(),
)
def test_groupby_rolling_count(self):
self._test_groupby_rolling_func("count")
def test_groupby_rolling_min(self):
self._test_groupby_rolling_func("min")
def test_groupby_rolling_max(self):
self._test_groupby_rolling_func("max")
def test_groupby_rolling_mean(self):
self._test_groupby_rolling_func("mean")
def test_groupby_rolling_sum(self):
self._test_groupby_rolling_func("sum")
def test_groupby_rolling_std(self):
# TODO: `std` now raise error in pandas 1.0.0
self._test_groupby_rolling_func("std")
def test_groupby_rolling_var(self):
self._test_groupby_rolling_func("var")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_ops_on_diff_frames_groupby_rolling import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
sonnyhu/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
tallakahath/pymatgen | pymatgen/analysis/structure_analyzer.py | 1 | 82945 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import six
import yaml
import os
"""
This module provides classes to perform topological analyses of structures.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Sai Jayaraman"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import math
from math import pi, asin, atan, sqrt, exp, cos
import numpy as np
import itertools
import collections
from warnings import warn
from scipy.spatial import Voronoi
from pymatgen import PeriodicSite
from pymatgen import Element, Specie, Composition
from pymatgen.util.num import abs_cap
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.surface import Slab, SlabGenerator
class VoronoiCoordFinder(object):
"""
Uses a Voronoi algorithm to determine the coordination for each site in a
structure.
Args:
structure (Structure): Input structure
target ([Element/Specie]): A list of target species to determine
coordination for.
cutoff (float): Radius in Angstrom cutoff to look for coordinating
atoms. Defaults to 10.0.
allow_pathological (bool): whether to allow infinite vertices in
determination of Voronoi coordination
"""
def __init__(self, structure, target=None, cutoff=10.0,
allow_pathological=False):
self._structure = structure
self.cutoff = cutoff
self.allow_pathological = allow_pathological
if target is None:
self._target = structure.composition.elements
else:
self._target = target
def get_voronoi_polyhedra(self, n):
"""
Gives a weighted polyhedra around a site. This uses the voronoi
construction with solid angle weights.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
n (int): Site index
Returns:
A dict of sites sharing a common Voronoi facet with the site
n and their solid angle weights
"""
localtarget = self._target
center = self._structure[n]
neighbors = self._structure.get_sites_in_sphere(
center.coords, self.cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(qvoronoi_input)
all_vertices = voro.vertices
results = {}
for nn, vind in voro.ridge_dict.items():
if 0 in nn:
if -1 in vind:
if self.allow_pathological:
continue
else:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
facets = [all_vertices[i] for i in vind]
results[neighbors[sorted(nn)[1]]] = solid_angle(
center.coords, facets)
maxangle = max(results.values())
resultweighted = {}
for nn, angle in results.items():
# is nn site is ordered use "nn.specie" to get species, else use "nn.species_and_occu" to get species
if nn.is_ordered:
if nn.specie in localtarget:
resultweighted[nn] = angle / maxangle
else: # is nn site is disordered
for disordered_sp in nn.species_and_occu.keys():
if disordered_sp in localtarget:
resultweighted[nn] = angle / maxangle
return resultweighted
def get_coordination_number(self, n):
"""
Returns the coordination number of site with index n.
Args:
n (int): Site index
"""
return sum(self.get_voronoi_polyhedra(n).values())
def get_coordinated_sites(self, n, tol=0, target=None):
"""
Returns the sites that are in the coordination radius of site with
index n.
Args:
n (int): Site index.
tol (float): Weight tolerance to determine if a particular pair is
considered a neighbor.
target (Element): Target element
Returns:
Sites coordinating input site.
"""
coordinated_sites = []
for site, weight in self.get_voronoi_polyhedra(n).items():
if weight > tol and (target is None or site.specie == target):
coordinated_sites.append(site)
return coordinated_sites
class JMolCoordFinder:
"""
Determine coordinated sites and coordination number using an emulation of
JMol's default autoBond() algorithm. This version of the algorithm does not
take into account any information regarding known charge states.
"""
def __init__(self, el_radius_updates=None):
"""
Initialize coordination finder parameters (atomic radii)
Args:
el_radius_updates: (dict) symbol->float to override default atomic
radii table values
"""
# load elemental radii table
bonds_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"bonds_jmol_ob.yaml")
with open(bonds_file, 'r') as f:
self.el_radius = yaml.load(f)
# update any user preference elemental radii
if el_radius_updates:
self.el_radius.update(el_radius_updates)
def get_max_bond_distance(self, el1_sym, el2_sym, constant=0.56):
"""
Use JMol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
constant: (float) factor to tune model
Returns: (float) max bond length
"""
return math.sqrt(
(self.el_radius[el1_sym] + self.el_radius[el2_sym] + constant) ** 2)
def get_coordination_number(self, structure, n, tol=1E-3):
"""
Get the coordination number of a site
Args:
structure: (Structure)
n: (int) index of site in the structure to get CN for
tol: (float) a numerical tolerance to extend search
Returns: (int) the coordination number
"""
return len(self.get_coordinated_sites(structure, n, tol))
def get_coordinated_sites(self, structure, n, tol=1E-3):
"""
Get the coordinated sites for a site
Args:
structure: (Structure)
n: (int) index of site in the structure to analyze
tol: (float) a numerical tolerance to extend search
Returns: ([sites]) a list of coordinated sites
"""
site = structure[n]
# determine relevant bond lengths based on atomic radii table
bonds = {}
for el in structure.composition.elements:
bonds[site.specie, el] = self.get_max_bond_distance(
site.specie.symbol, el.symbol)
# search for neighbors up to max bond length + tolerance
max_rad = max(bonds.values()) + tol
all_neighbors = []
for neighb, dist in structure.get_neighbors(site, max_rad):
# confirm neighbor based on bond length specific to atom pair
if dist <= bonds[(site.specie, neighb.specie)] + tol:
all_neighbors.append(neighb)
return all_neighbors
def average_coordination_number(structures, freq=10):
"""
Calculates the ensemble averaged Voronoi coordination numbers
of a list of Structures using VoronoiCoordFinder.
Typically used for analyzing the output of a Molecular Dynamics run.
Args:
structures (list): list of Structures.
freq (int): sampling frequency of coordination number [every freq steps].
Returns:
Dictionary of elements as keys and average coordination numbers as values.
"""
coordination_numbers = {}
for el in structures[0].composition.elements:
coordination_numbers[el.name] = 0.0
count = 0
for t in range(len(structures)):
if t % freq != 0:
continue
count += 1
vor = VoronoiCoordFinder(structures[t])
for atom in range(len(structures[0])):
cn = vor.get_coordination_number(atom)
coordination_numbers[structures[t][atom].species_string] += cn
elements = structures[0].composition.as_dict()
for el in coordination_numbers:
coordination_numbers[el] = coordination_numbers[el] / elements[
el] / count
return coordination_numbers
class VoronoiAnalyzer(object):
"""
Performs a statistical analysis of Voronoi polyhedra around each site.
Each Voronoi polyhedron is described using Schaefli notation.
That is a set of indices {c_i} where c_i is the number of faces with i
number of vertices. E.g. for a bcc crystal, there is only one polyhedron
notation of which is [0,6,0,8,0,0,...].
In perfect crystals, these also corresponds to the Wigner-Seitz cells.
For distorted-crystals, liquids or amorphous structures, rather than one-type,
there is a statistical distribution of polyhedra.
See ref: Microstructure and its relaxation in Fe-B amorphous system
simulated by molecular dynamics,
Stepanyuk et al., J. Non-cryst. Solids (1993), 159, 80-87.
Args:
cutoff (float): cutoff distance to search for neighbors of a given atom
(default = 5.0)
qhull_options (str): options to pass to qhull (optional)
"""
def __init__(self, cutoff=5.0, qhull_options="Qbb Qc Qz"):
self.cutoff = cutoff
self.qhull_options = qhull_options
def analyze(self, structure, n=0):
"""
Performs Voronoi analysis and returns the polyhedra around atom n
in Schlaefli notation.
Args:
structure (Structure): structure to analyze
n (int): index of the center atom in structure
Returns:
voronoi index of n: <c3,c4,c6,c6,c7,c8,c9,c10>
where c_i denotes number of facets with i vertices.
"""
center = structure[n]
neighbors = structure.get_sites_in_sphere(center.coords, self.cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
qvoronoi_input = np.array([s.coords for s in neighbors])
voro = Voronoi(qvoronoi_input, qhull_options=self.qhull_options)
vor_index = np.array([0, 0, 0, 0, 0, 0, 0, 0])
for key in voro.ridge_dict:
if 0 in key: # This means if the center atom is in key
if -1 in key: # This means if an infinity point is in key
raise ValueError("Cutoff too short.")
else:
try:
vor_index[len(voro.ridge_dict[key]) - 3] += 1
except IndexError:
# If a facet has more than 10 edges, it's skipped here.
pass
return vor_index
def analyze_structures(self, structures, step_freq=10,
most_frequent_polyhedra=15):
"""
Perform Voronoi analysis on a list of Structures.
Note that this might take a significant amount of time depending on the
size and number of structures.
Args:
structures (list): list of Structures
cutoff (float: cutoff distance around an atom to search for
neighbors
step_freq (int): perform analysis every step_freq steps
qhull_options (str): options to pass to qhull
most_frequent_polyhedra (int): this many unique polyhedra with
highest frequences is stored.
Returns:
A list of tuples in the form (voronoi_index,frequency)
"""
voro_dict = {}
step = 0
for structure in structures:
step += 1
if step % step_freq != 0:
continue
v = []
for n in range(len(structure)):
v.append(str(self.analyze(structure, n=n).view()))
for voro in v:
if voro in voro_dict:
voro_dict[voro] += 1
else:
voro_dict[voro] = 1
return sorted(voro_dict.items(),
key=lambda x: (x[1], x[0]),
reverse=True)[:most_frequent_polyhedra]
@staticmethod
def plot_vor_analysis(voronoi_ensemble):
t = zip(*voronoi_ensemble)
labels = t[0]
val = list(t[1])
tot = np.sum(val)
val = [float(j) / tot for j in val]
pos = np.arange(len(val)) + .5 # the bar centers on the y axis
import matplotlib.pyplot as plt
plt.figure()
plt.barh(pos, val, align='center', alpha=0.5)
plt.yticks(pos, labels)
plt.xlabel('Count')
plt.title('Voronoi Spectra')
plt.grid(True)
return plt
class RelaxationAnalyzer(object):
"""
This class analyzes the relaxation in a calculation.
"""
def __init__(self, initial_structure, final_structure):
"""
Please note that the input and final structures should have the same
ordering of sites. This is typically the case for most computational
codes.
Args:
initial_structure (Structure): Initial input structure to
calculation.
final_structure (Structure): Final output structure from
calculation.
"""
if final_structure.formula != initial_structure.formula:
raise ValueError("Initial and final structures have different " +
"formulas!")
self.initial = initial_structure
self.final = final_structure
def get_percentage_volume_change(self):
"""
Returns the percentage volume change.
Returns:
Volume change in percentage, e.g., 0.055 implies a 5.5% increase.
"""
initial_vol = self.initial.lattice.volume
final_vol = self.final.lattice.volume
return final_vol / initial_vol - 1
def get_percentage_lattice_parameter_changes(self):
"""
Returns the percentage lattice parameter changes.
Returns:
A dict of the percentage change in lattice parameter, e.g.,
{'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%,
2.1% and -3.1% in the a, b and c lattice parameters respectively.
"""
initial_latt = self.initial.lattice
final_latt = self.final.lattice
d = {l: getattr(final_latt, l) / getattr(initial_latt, l) - 1
for l in ["a", "b", "c"]}
return d
def get_percentage_bond_dist_changes(self, max_radius=3.0):
"""
Returns the percentage bond distance changes for each site up to a
maximum radius for nearest neighbors.
Args:
max_radius (float): Maximum radius to search for nearest
neighbors. This radius is applied to the initial structure,
not the final structure.
Returns:
Bond distance changes as a dict of dicts. E.g.,
{index1: {index2: 0.011, ...}}. For economy of representation, the
index1 is always less than index2, i.e., since bonding between
site1 and siten is the same as bonding between siten and site1,
there is no reason to duplicate the information or computation.
"""
data = collections.defaultdict(dict)
for inds in itertools.combinations(list(range(len(self.initial))), 2):
(i, j) = sorted(inds)
initial_dist = self.initial[i].distance(self.initial[j])
if initial_dist < max_radius:
final_dist = self.final[i].distance(self.final[j])
data[i][j] = final_dist / initial_dist - 1
return data
class VoronoiConnectivity(object):
"""
Computes the solid angles swept out by the shared face of the voronoi
polyhedron between two sites.
Args:
structure (Structure): Input structure
cutoff (float) Cutoff distance.
"""
# Radius in Angstrom cutoff to look for coordinating atoms
def __init__(self, structure, cutoff=10):
self.cutoff = cutoff
self.s = structure
recp_len = np.array(self.s.lattice.reciprocal_lattice.abc)
i = np.ceil(cutoff * recp_len / (2 * math.pi))
offsets = np.mgrid[-i[0]:i[0] + 1, -i[1]:i[1] + 1, -i[2]:i[2] + 1].T
self.offsets = np.reshape(offsets, (-1, 3))
# shape = [image, axis]
self.cart_offsets = self.s.lattice.get_cartesian_coords(self.offsets)
@property
def connectivity_array(self):
"""
Provides connectivity array.
Returns:
connectivity: An array of shape [atomi, atomj, imagej]. atomi is
the index of the atom in the input structure. Since the second
atom can be outside of the unit cell, it must be described
by both an atom index and an image index. Array data is the
solid angle of polygon between atomi and imagej of atomj
"""
# shape = [site, axis]
cart_coords = np.array(self.s.cart_coords)
# shape = [site, image, axis]
all_sites = cart_coords[:, None, :] + self.cart_offsets[None, :, :]
vt = Voronoi(all_sites.reshape((-1, 3)))
n_images = all_sites.shape[1]
cs = (len(self.s), len(self.s), len(self.cart_offsets))
connectivity = np.zeros(cs)
vts = np.array(vt.vertices)
for (ki, kj), v in vt.ridge_dict.items():
atomi = ki // n_images
atomj = kj // n_images
imagei = ki % n_images
imagej = kj % n_images
if imagei != n_images // 2 and imagej != n_images // 2:
continue
if imagei == n_images // 2:
# atomi is in original cell
val = solid_angle(vt.points[ki], vts[v])
connectivity[atomi, atomj, imagej] = val
if imagej == n_images // 2:
# atomj is in original cell
val = solid_angle(vt.points[kj], vts[v])
connectivity[atomj, atomi, imagei] = val
if -10.101 in vts[v]:
warn('Found connectivity with infinite vertex. '
'Cutoff is too low, and results may be '
'incorrect')
return connectivity
@property
def max_connectivity(self):
"""
returns the 2d array [sitei, sitej] that represents
the maximum connectivity of site i to any periodic
image of site j
"""
return np.max(self.connectivity_array, axis=2)
def get_connections(self):
"""
Returns a list of site pairs that are Voronoi Neighbors, along
with their real-space distances.
"""
con = []
maxconn = self.max_connectivity
for ii in range(0, maxconn.shape[0]):
for jj in range(0, maxconn.shape[1]):
if maxconn[ii][jj] != 0:
dist = self.s.get_distance(ii, jj)
con.append([ii, jj, dist])
return con
def get_sitej(self, site_index, image_index):
"""
Assuming there is some value in the connectivity array at indices
(1, 3, 12). sitei can be obtained directly from the input structure
(structure[1]). sitej can be obtained by passing 3, 12 to this function
Args:
site_index (int): index of the site (3 in the example)
image_index (int): index of the image (12 in the example)
"""
atoms_n_occu = self.s[site_index].species_and_occu
lattice = self.s.lattice
coords = self.s[site_index].frac_coords + self.offsets[image_index]
return PeriodicSite(atoms_n_occu, coords, lattice)
def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
vals = []
for i in range(len(n) - 1):
v = -np.dot(n[i], n[i + 1]) \
/ (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
vals.append(math.acos(abs_cap(v)))
phi = sum(vals)
return phi + (3 - len(r)) * math.pi
def get_max_bond_lengths(structure, el_radius_updates=None):
"""
Provides max bond length estimates for a structure based on the JMol
table and algorithms.
Args:
structure: (structure)
el_radius_updates: (dict) symbol->float to update atomic radii
Returns: (dict) - (Element1, Element2) -> float. The two elements are
ordered by Z.
"""
jmc = JMolCoordFinder(el_radius_updates)
bonds_lens = {}
els = sorted(structure.composition.elements, key=lambda x: x.Z)
for i1 in range(len(els)):
for i2 in range(len(els) - i1):
bonds_lens[els[i1], els[i1 + i2]] = jmc.get_max_bond_distance(
els[i1].symbol, els[i1 + i2].symbol)
return bonds_lens
def get_dimensionality(structure, max_hkl=2, el_radius_updates=None,
min_slab_size=5, min_vacuum_size=5,
standardize=True):
"""
This method returns whether a structure is 3D, 2D (layered), or 1D (linear
chains or molecules) according to the algorithm published in Gorai, P.,
Toberer, E. & Stevanovic, V. Computational Identification of Promising
Thermoelectric Materials Among Known Quasi-2D Binary Compounds. J. Mater.
Chem. A 2, 4136 (2016).
Note that a 1D structure detection might indicate problems in the bonding
algorithm, particularly for ionic crystals (e.g., NaCl)
Args:
structure: (Structure) structure to analyze dimensionality for
max_hkl: (int) max index of planes to look for layers
el_radius_updates: (dict) symbol->float to update atomic radii
min_slab_size: (float) internal surface construction parameter
min_vacuum_size: (float) internal surface construction parameter
standardize (bool): whether to standardize the structure before
analysis. Set to False only if you already have the structure in a
convention where layers / chains will be along low <hkl> indexes.
Returns: (int) the dimensionality of the structure - 1 (molecules/chains),
2 (layered), or 3 (3D)
"""
if standardize:
structure = SpacegroupAnalyzer(structure).\
get_conventional_standard_structure()
bonds = get_max_bond_lengths(structure)
num_surfaces = 0
for h in range(max_hkl):
for k in range(max_hkl):
for l in range(max_hkl):
if max([h, k, l]) > 0 and num_surfaces < 2:
sg = SlabGenerator(structure, (h, k, l),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size)
slabs = sg.get_slabs(bonds)
for _ in slabs:
num_surfaces += 1
return 3 - min(num_surfaces, 2)
def contains_peroxide(structure, relative_cutoff=1.1):
"""
Determines if a structure contains peroxide anions.
Args:
structure (Structure): Input structure.
relative_cutoff: The peroxide bond distance is 1.49 Angstrom.
Relative_cutoff * 1.49 stipulates the maximum distance two O
atoms must be to each other to be considered a peroxide.
Returns:
Boolean indicating if structure contains a peroxide anion.
"""
ox_type = oxide_type(structure, relative_cutoff)
if ox_type == "peroxide":
return True
else:
return False
class OxideType(object):
"""
Separate class for determining oxide type.
Args:
structure: Input structure.
relative_cutoff: Relative_cutoff * act. cutoff stipulates the max.
distance two O atoms must be from each other. Default value is
1.1. At most 1.1 is recommended, nothing larger, otherwise the
script cannot distinguish between superoxides and peroxides.
"""
def __init__(self, structure, relative_cutoff=1.1):
self.structure = structure
self.relative_cutoff = relative_cutoff
self.oxide_type, self.nbonds = self.parse_oxide()
def parse_oxide(self):
"""
Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide.
Returns:
oxide_type (str): Type of oxide
ozonide/peroxide/superoxide/hydroxide/None.
nbonds (int): Number of peroxide/superoxide/hydroxide bonds in
structure.
"""
structure = self.structure
relative_cutoff = self.relative_cutoff
o_sites_frac_coords = []
h_sites_frac_coords = []
lattice = structure.lattice
if isinstance(structure.composition.elements[0], Element):
comp = structure.composition
elif isinstance(structure.composition.elements[0], Specie):
elmap = collections.defaultdict(float)
for site in structure:
for species, occu in site.species_and_occu.items():
elmap[species.element] += occu
comp = Composition(elmap)
if Element("O") not in comp or comp.is_element:
return "None", 0
for site in structure:
syms = [sp.symbol for sp in site.species_and_occu.keys()]
if "O" in syms:
o_sites_frac_coords.append(site.frac_coords)
if "H" in syms:
h_sites_frac_coords.append(site.frac_coords)
if h_sites_frac_coords:
dist_matrix = lattice.get_all_distances(o_sites_frac_coords,
h_sites_frac_coords)
if np.any(dist_matrix < relative_cutoff * 0.93):
return "hydroxide", len(
np.where(dist_matrix < relative_cutoff * 0.93)[0]) / 2.0
dist_matrix = lattice.get_all_distances(o_sites_frac_coords,
o_sites_frac_coords)
np.fill_diagonal(dist_matrix, 1000)
is_superoxide = False
is_peroxide = False
is_ozonide = False
if np.any(dist_matrix < relative_cutoff * 1.35):
bond_atoms = np.where(dist_matrix < relative_cutoff * 1.35)[0]
is_superoxide = True
elif np.any(dist_matrix < relative_cutoff * 1.49):
is_peroxide = True
bond_atoms = np.where(dist_matrix < relative_cutoff * 1.49)[0]
if is_superoxide:
if len(bond_atoms) > len(set(bond_atoms)):
is_superoxide = False
is_ozonide = True
try:
nbonds = len(set(bond_atoms))
except UnboundLocalError:
nbonds = 0.0
if is_ozonide:
str_oxide = "ozonide"
elif is_superoxide:
str_oxide = "superoxide"
elif is_peroxide:
str_oxide = "peroxide"
else:
str_oxide = "oxide"
if str_oxide == "oxide":
nbonds = comp["O"]
return str_oxide, nbonds
def oxide_type(structure, relative_cutoff=1.1, return_nbonds=False):
"""
Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide
Args:
structure (Structure): Input structure.
relative_cutoff (float): Relative_cutoff * act. cutoff stipulates the
max distance two O atoms must be from each other.
return_nbonds (bool): Should number of bonds be requested?
"""
ox_obj = OxideType(structure, relative_cutoff)
if return_nbonds:
return ox_obj.oxide_type, ox_obj.nbonds
else:
return ox_obj.oxide_type
def sulfide_type(structure):
"""
Determines if a structure is a sulfide/polysulfide
Args:
structure (Structure): Input structure.
Returns:
(str) sulfide/polysulfide/sulfate
"""
structure = structure.copy()
structure.remove_oxidation_states()
s = Element("S")
comp = structure.composition
if comp.is_element or s not in comp:
return None
finder = SpacegroupAnalyzer(structure, symprec=0.1)
symm_structure = finder.get_symmetrized_structure()
s_sites = [sites[0] for sites in symm_structure.equivalent_sites if sites[0].specie == s]
def process_site(site):
neighbors = structure.get_neighbors(site, 4)
neighbors = sorted(neighbors, key=lambda n: n[1])
nn, dist = neighbors[0]
coord_elements = [site.specie for site, d in neighbors
if d < dist + 0.4][:4]
avg_electroneg = np.mean([e.X for e in coord_elements])
if avg_electroneg > s.X:
return "sulfate"
elif avg_electroneg == s.X and s in coord_elements:
return "polysulfide"
else:
return "sulfide"
types = set([process_site(site) for site in s_sites])
if "sulfate" in types:
return None
elif "polysulfide" in types:
return "polysulfide"
else:
return "sulfide"
def gramschmidt(vin, uin):
"""
Returns that part of the first input vector
that is orthogonal to the second input vector.
The output vector is not normalized.
Args:
vin (numpy array):
first input vector
uin (numpy array):
second input vector
"""
vin_uin = np.inner(vin, uin)
uin_uin = np.inner(uin, uin)
if uin_uin <= 0.0:
raise ValueError("Zero or negative inner product!")
return vin - (vin_uin / uin_uin) * uin
class OrderParameters(object):
"""
This class permits the calculation of various types of local order
parameters.
"""
__supported_types = (
"cn", "lin", "bent", "tet", "oct", "bcc", "q2", "q4", "q6",
"reg_tri", "sq", "sq_pyr", "tri_bipyr")
def __init__(self, types, parameters=None, cutoff=-10.0):
"""
Create an OrderParameter analyzer instance.
Args:
types ([string]):
List of strings representing the types of order parameters
to be calculated. Note that multiple mentions of the
same type may occur. Currently available types are
"cn" (simple coordination number---normalized,
if desired),
"lin" [Peters-style OP recognizing linear coordination
(Zimmermann & Jain, in progress, 2017)],
"bent" [Peters-style OP recognizing bent coordination
(Zimmermann & Jain, in progress, 2017)],
"tet" [Peters-style OP recognizing tetrahedral
coordination (Zimmermann et al.,
J. Am. Chem. Soc., 137, 13352-13361, 2015)],
"oct" [Peters-style OP recognizing octahedral
coordination (Zimmermann et al.,
J. Am. Chem. Soc., 137, 13352-13361, 2015)],
"bcc" [Peters-style OP recognizing local
body-centered cubic environment (Peters,
J. Chem. Phys., 131, 244103, 2009)],
"reg_tri" (OP recognizing coordination with a regular triangle),
"sq" (OP recognizing square coordination),
"sq_pyr" (OP recognizing square pyramidal coordination),
"tri_bipyr" (OP recognizing trigonal bipyramidal coord.),
"q2" [Bond orientational order parameter (BOOP)
of weight l=2 (Steinhardt et al., Phys. Rev. B,
28, 784-805, 1983)],
"q4" (BOOP of weight l=4),
"q6" (BOOP of weight l=6).
parameters ([[float]]):
2D list of floating point numbers that store
parameters associated with the different order parameters
that are to be calculated (1st dimension = length of
types tuple; any 2nd dimension may be zero, in which case
default values are used). In the following, those order
parameters q_i are listed that require further parameters
for their computation (values in brackets denote default
values):
"cn": normalizing constant (1);
"lin": Gaussian width in fractions of pi (180 degrees)
reflecting the "speed of penalizing" deviations
away from 180 degrees of any individual
neighbor1-center-neighbor2 configuration (0.0667);
"bent": target angle in degrees (180);
Gaussian width for penalizing deviations away
from perfect target angle in fractions of pi
(0.0667);
"tet": Gaussian width for penalizing deviations away
perfecttetrahedral angle (0.0667);
"oct": threshold angle in degrees distinguishing a second
neighbor to be either close to the south pole or
close to the equator (160.0);
Gaussian width for penalizing deviations away
from south pole (0.0667);
Gaussian width for penalizing deviations away
from equator (0.0556);
constant for shifting q_oct toward smaller
values, which can be helpful when trying to fine-
tune the capabilities of distinguishing between
different environments (e.g., tet vs oct)
given a single mutual threshold q_thresh;
"bcc": south-pole threshold angle as for "oct" (160.0);
south-pole Gaussian width as for "oct" (0.0667);
"reg_tri": Gaussian width for penalizing angles away from
the expected angles, given the estimated
height-to-side ratio of the trigonal pyramid
in which the central atom is located at the
tip (0.0222);
"sq": Gaussian width for penalizing angles away from
the expected angles, given the estimated
height-to-diagonal ratio of the pyramid in which
the central atom is located at the tip
(0.0333);
"sq_pyr": Gaussian width in fractions of pi
for penalizing angles away from 90 degrees
(0.0333);
Gaussian width in Angstrom for penalizing
variations in neighbor distances (0.1);
"tri_bipyr": threshold angle to identify close to
South pole positions (160.0, cf., oct).
Gaussian width for penalizing deviations away
from south pole (0.0667);
Gaussian width for penalizing deviations away
from equator (0.0556).
cutoff (float):
Cutoff radius to determine which nearest neighbors are
supposed to contribute to the order parameters.
If the value is negative the neighboring sites found by
distance and cutoff radius are further
pruned using the get_coordinated_sites method from the
VoronoiCoordFinder class.
"""
if len(types) == 0:
raise ValueError("Empty types list!")
for t in types:
if t not in OrderParameters.__supported_types:
raise ValueError("Unknown order parameter type (" + \
t + ")!")
if parameters is not None:
if len(types) != len(parameters):
raise ValueError("1st dimension of parameters array is not"
" consistent with types list!")
for lp in parameters:
if len(lp) > 0:
for p in lp:
if type(p) != float and type(p) != int:
raise AttributeError("Expected only float and"
" integer type parameters!")
loc_parameters = list(parameters)
else:
loc_parameters = [[] for t in types]
self._types = tuple(types)
tmpparas = []
self._computerijs = self._computerjks = self._geomops = False
self._geomops2 = self._boops = False
self._max_trig_order = -1
for i, t in enumerate(self._types):
# add here any additional parameter checking and
# default value assignment
tmpparas.append([])
if t == "cn":
if len(loc_parameters[i]) == 0:
tmpparas[i].append(1.0)
else:
if loc_parameters[i][0] == 0.0:
raise ValueError("Normalizing constant for"
" coordination-number based order"
" parameter is zero!")
else:
tmpparas[i].append(loc_parameters[i][0])
elif t == "lin":
if len(loc_parameters[i]) == 0:
tmpparas[i] = [1.0 / 0.0667]
else:
if loc_parameters[i][0] == 0.0:
raise ValueError("Gaussian width for"
" linear order"
" parameter is zero!")
else:
tmpparas[i] = [1.0 / loc_parameters[i][0]]
elif t == "bent":
if len(loc_parameters[i]) == 0:
tmpparas[i] = [1.0, 1.0 / 0.0667]
else:
if loc_parameters[i][0] <= 0.0 or loc_parameters[i][
0] > 180.0:
warn("Target angle for bent order parameter is"
" not in ]0,180] interval.")
if loc_parameters[i][1] == 0.0:
raise ValueError("Gaussian width for"
" bent order"
" parameter is zero!")
else:
tmpparas[i] = [loc_parameters[i][0] / 180.0, \
1.0 / loc_parameters[i][1]]
elif t == "tet":
if len(loc_parameters[i]) == 0:
tmpparas[i].append(1.0 / 0.0667)
else:
if loc_parameters[i][0] == 0.0:
raise ValueError("Gaussian width for"
" tetrahedral order"
" parameter is zero!")
else:
tmpparas[i].append(1.0 / loc_parameters[i][0])
elif t == "oct":
if len(loc_parameters[i]) < 4:
tmpparas[i].append(8.0 * pi / 9.0)
tmpparas[i].append(1.0 / 0.0667)
tmpparas[i].append(1.0 / 0.0556)
tmpparas[i].append(0.25)
tmpparas[i].append(4.0 / 3.0)
else:
if loc_parameters[i][0] <= 0.0 or loc_parameters[i][
0] >= 180.0:
warn("Threshold value for south pole"
" configurations in octahedral order"
" parameter outside ]0,180[")
tmpparas[i].append(loc_parameters[i][0] * pi / 180.0)
if loc_parameters[i][1] == 0.0:
raise ValueError("Gaussian width for south pole"
" configurations in octahedral"
" order parameter is zero!")
else:
tmpparas[i].append(1.0 / loc_parameters[i][1])
if loc_parameters[i][2] == 0.0:
raise ValueError("Gaussian width for equatorial"
" configurations in octahedral"
" order parameter is zero!")
else:
tmpparas[i].append(1.0 / loc_parameters[i][2])
if loc_parameters[i][3] - 1.0 == 0.0:
raise ValueError("Shift constant may not be"
" unity!")
if loc_parameters[i][3] < 0.0 or loc_parameters[i][3] > 1.0:
warn("Shift constant outside [0,1[.")
tmpparas[i].append(loc_parameters[i][3])
tmpparas[i].append(1.0 / (1.0 - loc_parameters[i][3]))
elif t == "bcc":
if len(loc_parameters[i]) < 2:
tmpparas[i].append(8.0 * pi / 9.0)
tmpparas[i].append(1.0 / 0.0667)
else:
if loc_parameters[i][0] <= 0.0 or loc_parameters[i][
0] >= 180.0:
warn("Threshold value for south pole"
" configurations in bcc order"
" parameter outside ]0,180[")
tmpparas[i].append(loc_parameters[i][0] * pi / 180.0)
if loc_parameters[i][1] == 0.0:
raise ValueError("Gaussian width for south pole"
" configurations in bcc"
" order parameter is zero!")
else:
tmpparas[i].append(1.0 / loc_parameters[i][1])
elif t == "reg_tri":
if len(loc_parameters[i]) == 0:
tmpparas[i] = [1.0 / 0.0222]
else:
if loc_parameters[i][0] == 0.0:
raise ValueError("Gaussian width for angles in"
" trigonal pyramid tip of regular triangle"
" order parameter is zero!")
tmpparas[i] = [1.0 / loc_parameters[i][0]]
elif t == "sq":
if len(loc_parameters[i]) == 0:
tmpparas[i] = [1.0 / 0.0333]
else:
if loc_parameters[i][0] == 0.0:
raise ValueError("Gaussian width for angles in"
" pyramid tip of square order parameter"
" is zero!")
tmpparas[i] = [1.0 / loc_parameters[i][0]]
elif t == "sq_pyr":
if len(loc_parameters[i]) == 0:
tmpparas[i] = [1.0 / 0.0333, 1.0 / 0.1]
else:
if loc_parameters[i][0] == 0.0:
raise ValueError("Gaussian width for angles in"
" square pyramid order parameter is zero!")
if loc_parameters[i][0] == 0.0:
raise ValueError("Gaussian width for lengths in"
" square pyramid order parameter is zero!")
tmpparas[i] = [1.0 / loc_parameters[i][0], \
1.0 / loc_parameters[i][1]]
elif t == "tri_bipyr":
if len(loc_parameters[i]) < 3:
tmpparas[i].append(8.0 * pi / 9.0)
tmpparas[i].append(1.0 / 0.0667)
tmpparas[i].append(1.0 / 0.0741)
else:
if loc_parameters[i][0] <= 0.0 or loc_parameters[i][
0] >= 180.0:
warn("Threshold value for south pole"
" configurations in octahedral order"
" parameter outside ]0,180[")
tmpparas[i].append(loc_parameters[i][0] * pi / 180.0)
if loc_parameters[i][1] == 0.0:
raise ValueError("Gaussian width for south pole"
" configurations in trigonal"
" bipyramidal order parameter is"
" zero!")
else:
tmpparas[i].append(1.0 / loc_parameters[i][1])
if loc_parameters[i][2] == 0.0:
raise ValueError("Gaussian width for equatorial"
" configurations in trigonal"
" bipyramidal order parameter"
" is zero!")
else:
tmpparas[i].append(1.0 / loc_parameters[i][2])
# All following types should be well-defined/-implemented,
# and they should not require parameters.
elif t != "q2" and t != "q4" and t != "q6":
raise ValueError("unknown order-parameter type \"" + t + "\"")
# Add here any additional flags to be used during calculation.
# self._computerijs: compute vectors from centeral atom i
# to any neighbor j.
# self._computerjks: compute vectors from non-centeral atom j
# to any non-central atom k.
if t == "tet" or t == "oct" or t == "bcc" or t == "sq_pyr" or \
t == "tri_bipyr":
self._computerijs = self._geomops = True
if t == "reg_tri" or t =="sq":
self._computerijs = self._computerjks = self._geomops2 = True
if t == "q2" or t == "q4" or t == "q6":
self._computerijs = self._boops = True
if t == "q2" and self._max_trig_order < 2:
self._max_trig_order = 2
if t == "q4" and self._max_trig_order < 4:
self._max_trig_order = 4
if t == "q6" and self._max_trig_order < 6:
self._max_trig_order = 6
# Finish parameter treatment.
self._paras = list(tmpparas)
if cutoff < 0.0:
self._cutoff = -cutoff
self._voroneigh = True
elif cutoff > 0.0:
self._cutoff = cutoff
self._voroneigh = False
else:
raise ValueError("Cutoff radius is zero!")
# Further variable definitions.
self._last_nneigh = -1
self._pow_sin_t = {}
self._pow_cos_t = {}
self._sin_n_p = {}
self._cos_n_p = {}
@property
def num_ops(self):
""""
Returns the number of different order parameters that are targeted
to be calculated.
"""
return len(self._types)
@property
def last_nneigh(self):
""""
Returns the number of neighbors encountered during the most
recent order-parameter calculation. A value of -1 indicates that
no such calculation has yet been performed for this instance.
"""
return len(self._last_nneigh)
def compute_trigonometric_terms(self, thetas, phis):
""""
Computes trigonometric terms that are required to
calculate bond orientational order parameters.
Args:
thetas ([float]):
polar angles of all neighbors in radians.
phis ([float]):
azimuth angles of all neighbors in radians. The list of
azimuth angles is expected to have the same size as the list
of polar angles; otherwise, a ValueError is raised. Also,
the two lists of angles have to be coherent in order. That
is, it is expected that the order in the list of azimuth
angles corresponds to a distinct sequence of neighbors.
And, this sequence has to equal the sequence
of neighbors in the list of polar angles.
"""
if len(thetas) != len(phis):
raise ValueError("List of polar and azimuthal angles have to be"
" equal!")
self._pow_sin_t.clear()
self._pow_cos_t.clear()
self._sin_n_p.clear()
self._cos_n_p.clear()
self._pow_sin_t[1] = [math.sin(float(t)) for t in thetas]
self._pow_cos_t[1] = [math.cos(float(t)) for t in thetas]
self._sin_n_p[1] = [math.sin(float(p)) for p in phis]
self._cos_n_p[1] = [math.cos(float(p)) for p in phis]
for i in range(2, self._max_trig_order + 1):
self._pow_sin_t[i] = [e[0] * e[1] for e in zip(
self._pow_sin_t[i - 1], self._pow_sin_t[1])]
self._pow_cos_t[i] = [e[0] * e[1] for e in zip(
self._pow_cos_t[i - 1], self._pow_cos_t[1])]
self._sin_n_p[i] = [math.sin(float(i) * float(p)) \
for p in phis]
self._cos_n_p[i] = [math.cos(float(i) * float(p)) \
for p in phis]
def get_q2(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=2. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]):
polar angles of all neighbors in radians.
phis ([float]):
azimuth angles of all neighbors in radians.
Return:
q2 (float): bond orientational order parameter of weight l=2
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
sqrt_15_2pi = math.sqrt(15.0 / (2.0 * pi))
sqrt_5_pi = math.sqrt(5.0 / pi)
pre_y_2_2 = [0.25 * sqrt_15_2pi * val for val in self._pow_sin_t[2]]
pre_y_2_1 = [0.5 * sqrt_15_2pi * val[0] * val[1]
for val in zip(self._pow_sin_t[1], self._pow_cos_t[1])]
acc = 0.0
# Y_2_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag -= pre_y_2_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_2_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_2_0
real = imag = 0.0
for i in nnn_range:
real += 0.25 * sqrt_5_pi * (3.0 * self._pow_cos_t[2][i] - 1.0)
acc += (real * real)
# Y_2_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_2_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag += pre_y_2_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
q2 = math.sqrt(4.0 * pi * acc / (5.0 * float(nnn * nnn)))
return q2
def get_q4(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=4. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]):
polar angles of all neighbors in radians.
phis ([float]):
azimuth angles of all neighbors in radians.
Return:
q4 (float): bond orientational order parameter of weight l=4
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i16_3 = 3.0 / 16.0
i8_3 = 3.0 / 8.0
sqrt_35_pi = math.sqrt(35.0 / pi)
sqrt_35_2pi = math.sqrt(35.0 / (2.0 * pi))
sqrt_5_pi = math.sqrt(5.0 / pi)
sqrt_5_2pi = math.sqrt(5.0 / (2.0 * pi))
sqrt_1_pi = math.sqrt(1.0 / pi)
pre_y_4_4 = [i16_3 * sqrt_35_2pi * val for val in self._pow_sin_t[4]]
pre_y_4_3 = [i8_3 * sqrt_35_pi * val[0] * val[1] \
for val in zip(self._pow_sin_t[3], self._pow_cos_t[1])]
pre_y_4_2 = [i8_3 * sqrt_5_2pi * val[0] * (7.0 * val[1] - 1.0) \
for val in zip(self._pow_sin_t[2], self._pow_cos_t[2])]
pre_y_4_1 = [i8_3 * sqrt_5_pi * val[0] * (7.0 * val[1] - 3.0 * val[2]) \
for val in zip(self._pow_sin_t[1], self._pow_cos_t[3], \
self._pow_cos_t[1])]
acc = 0.0
# Y_4_-4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag -= pre_y_4_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
# Y_4_-3
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_4_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag -= pre_y_4_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_4_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_4_0
real = imag = 0.0
for i in nnn_range:
real += i16_3 * sqrt_1_pi * (35.0 * self._pow_cos_t[4][i] - \
30.0 * self._pow_cos_t[2][i] + 3.0)
acc += (real * real)
# Y_4_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_4_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag += pre_y_4_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_4_3
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_4_4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag += pre_y_4_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
q4 = math.sqrt(4.0 * pi * acc / (9.0 * float(nnn * nnn)))
return q4
def get_q6(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=6. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]):
polar angles of all neighbors in radians.
phis ([float]):
azimuth angles of all neighbors in radians.
Return:
q6 (float): bond orientational order parameter of weight l=6
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i64 = 1.0 / 64.0
i32 = 1.0 / 32.0
i32_3 = 3.0 / 32.0
i16 = 1.0 / 16.0
sqrt_3003_pi = math.sqrt(3003.0 / pi)
sqrt_1001_pi = math.sqrt(1001.0 / pi)
sqrt_91_2pi = math.sqrt(91.0 / (2.0 * pi))
sqrt_1365_pi = math.sqrt(1365.0 / pi)
sqrt_273_2pi = math.sqrt(273.0 / (2.0 * pi))
sqrt_13_pi = math.sqrt(13.0 / pi)
pre_y_6_6 = [i64 * sqrt_3003_pi * val for val in self._pow_sin_t[6]]
pre_y_6_5 = [i32_3 * sqrt_1001_pi * val[0] * val[1] \
for val in zip(self._pow_sin_t[5], self._pow_cos_t[1])]
pre_y_6_4 = [i32_3 * sqrt_91_2pi * val[0] * (11.0 * val[1] - 1.0) \
for val in zip(self._pow_sin_t[4], self._pow_cos_t[2])]
pre_y_6_3 = [
i32 * sqrt_1365_pi * val[0] * (11.0 * val[1] - 3.0 * val[2]) \
for val in zip(self._pow_sin_t[3], self._pow_cos_t[3], \
self._pow_cos_t[1])]
pre_y_6_2 = [i64 * sqrt_1365_pi * val[0] * (33.0 * val[1] - \
18.0 * val[2] + 1.0) for val
in zip(self._pow_sin_t[2], \
self._pow_cos_t[4], self._pow_cos_t[2])]
pre_y_6_1 = [i16 * sqrt_273_2pi * val[0] * (33.0 * val[1] - \
30.0 * val[2] + 5.0 * val[
3]) for val in
zip(self._pow_sin_t[1], \
self._pow_cos_t[5], self._pow_cos_t[3],
self._pow_cos_t[1])]
acc = 0.0
# Y_6_-6
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i] # cos(x) = cos(-x)
imag -= pre_y_6_6[i] * self._sin_n_p[6][i] # sin(x) = -sin(-x)
acc += (real * real + imag * imag)
# Y_6_-5
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += (real * real + imag * imag)
# Y_6_-4
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag -= pre_y_6_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
# Y_6_-3
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_6_-2
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag -= pre_y_6_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_6_-1
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_6_0
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += i32 * sqrt_13_pi * (231.0 * self._pow_cos_t[6][i] - \
315.0 * self._pow_cos_t[4][i] + 105.0 *
self._pow_cos_t[2][i] - 5.0)
acc += (real * real)
# Y_6_1
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_6_2
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag += pre_y_6_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_6_3
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_6_4
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag += pre_y_6_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
# Y_6_5
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += (real * real + imag * imag)
# Y_6_6
real = imag = 0.0
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i]
imag += pre_y_6_6[i] * self._sin_n_p[6][i]
acc += (real * real + imag * imag)
q6 = math.sqrt(4.0 * pi * acc / (13.0 * float(nnn * nnn)))
return q6
def get_type(self, index):
"""
Return type of order-parameter at the index provided and
represented by a short string.
Args:
index (int):
index of order-parameter for which type is to be returned
"""
if index < 0 or index >= len(self._types):
raise ValueError("Index for getting order-parameter type"
" out-of-bounds!")
return self._types[index]
def get_parameters(self, index):
"""
Returns list of floats that represents
the parameters associated with calculation of the order
parameter that was defined at the index provided.
Attention: the parameters do not need to equal those originally
inputted because of processing out of efficiency reasons.
Args:
index (int):
index of order-parameter for which associated parameters
are to be returned
"""
if index < 0 or index >= len(self._types):
raise ValueError("Index for getting parameters associated with"
" order-parameter calculation out-of-bounds!")
return self._paras[index]
def get_order_parameters(self, structure, n, indeces_neighs=None, \
tol=0.0, target_spec=None):
"""
Compute all order parameters of site n.
Args:
structure (Structure):
input structure.
n (int):
index of site in input structure, for which OPs are to be
calculated. Note that we do not use the sites iterator
here, but directly access sites via struct[index].
indeces_neighs ([int]):
list of indeces of those neighbors in Structure object
structure that are to be considered for OP computation.
This optional argument overwrites the way neighbors are
to be determined as defined in the constructor (i.e.,
Voronoi coordination finder via negative cutoff radius
vs constant cutoff radius if cutoff was positive).
We do not use information about the underlying
structure lattice if the neighbor indeces are explicitly
provided. This has two important consequences. First,
the input Structure object can, in fact, be a
simple list of Site objects. Second, no nearest images
of neighbors are determined when providing an index list.
Note furthermore that this neighbor
determination type ignores the optional target_spec
argument.
tol (float):
threshold of weight (= solid angle / maximal solid angle)
to determine if a particular pair is
considered neighbors; this is relevant only in the case
when Voronoi polyhedra are used to determine coordination
target_spec (Specie):
target specie to be considered when calculating the order
parameters of site n; None includes all species of input
structure.
Returns:
list of floats representing order parameters. Should it not be
possible to compute a given OP for a conceptual reason, the
corresponding entry is None instead of a float. For Steinhardt
et al.'s bond orientational OPs and the other geometric OPs
("tet", "oct", "bcc"), this can happen if there is a single
neighbor around site n in the structure because that, obviously,
does not permit calculation of angles between multiple
neighbors.
"""
# Do error-checking and initialization.
if n < 0:
raise ValueError("Site index smaller zero!")
if n >= len(structure):
raise ValueError("Site index beyond maximum!")
if indeces_neighs is not None:
for index in indeces_neighs:
if index >= len(structure):
raise ValueError("Neighbor site index beyond maximum!")
if tol < 0.0:
raise ValueError("Negative tolerance for weighted solid angle!")
left_of_unity = 1.0 - 1.0e-12
# The following threshold has to be adapted to non-Angstrom units.
very_small = 1.0e-12
# Find central site and its neighbors.
# Note that we adopt the same way of accessing sites here as in
# VoronoiCoordFinder; that is, not via the sites iterator.
centsite = structure[n]
if indeces_neighs is not None:
neighsites = [structure[index] for index in indeces_neighs]
elif self._voroneigh:
vorocf = VoronoiCoordFinder(structure)
neighsites = vorocf.get_coordinated_sites(n, tol, target_spec)
else:
# Structure.get_sites_in_sphere --> also other periodic images
neighsitestmp = [i[0] for i in structure.get_sites_in_sphere(
centsite.coords, self._cutoff)]
neighsites = []
if centsite not in neighsitestmp:
raise ValueError("Could not find center site!")
else:
neighsitestmp.remove(centsite)
if target_spec is None:
neighsites = list(neighsitestmp)
else:
neighsites[:] = [site for site in neighsitestmp \
if site.specie.symbol == target_spec]
nneigh = len(neighsites)
self._last_nneigh = nneigh
# Prepare angle calculations, if applicable.
rij = []
rjk = []
rijnorm = []
rjknorm = []
dist = []
distjk_unique = []
distjk = []
centvec = centsite.coords
if self._computerijs:
for j, neigh in enumerate(neighsites):
rij.append((neigh.coords - centvec))
dist.append(np.linalg.norm(rij[j]))
rijnorm.append((rij[j] / dist[j]))
if self._computerjks:
for j, neigh in enumerate(neighsites):
rjk.append([])
rjknorm.append([])
distjk.append([])
kk = 0
for k in range(len(neighsites)):
if j != k:
rjk[j].append(neighsites[k].coords - neigh.coords)
distjk[j].append(np.linalg.norm(rjk[j][kk]))
if k > j:
distjk_unique.append(distjk[j][kk])
rjknorm[j].append(rjk[j][kk] / distjk[j][kk])
kk = kk + 1
# Initialize OP list and, then, calculate OPs.
ops = [0.0 for t in self._types]
# First, coordination number-based OPs.
for i, t in enumerate(self._types):
if t == "cn":
ops[i] = nneigh / self._paras[i][0]
# Then, bond orientational OPs based on spherical harmonics
# according to Steinhardt et al., Phys. Rev. B, 28, 784-805, 1983.
if self._boops:
thetas = []
phis = []
for j, vec in enumerate(rijnorm):
# z is North pole --> theta between vec and (0, 0, 1)^T.
# Because vec is normalized, dot product is simply vec[2].
thetas.append(math.acos(max(-1.0, min(vec[2], 1.0))))
tmpphi = 0.0
# Compute phi only if it is not (almost) perfectly
# aligned with z-axis.
if vec[2] < left_of_unity and vec[2] > - (left_of_unity):
# x is prime meridian --> phi between projection of vec
# into x-y plane and (1, 0, 0)^T
tmpphi = math.acos(max(
-1.0,
min(vec[0] / (math.sqrt(
vec[0] * vec[0] + vec[1] * vec[1])),
1.0)))
if vec[1] < 0.0:
tmpphi = -tmpphi
phis.append(tmpphi)
# Note that None flags that we have too few neighbors
# for calculating BOOPS.
for i, t in enumerate(self._types):
if t == "q2":
ops[i] = self.get_q2(thetas, phis) if len(
thetas) > 0 else None
elif t == "q4":
ops[i] = self.get_q4(thetas, phis) if len(
thetas) > 0 else None
elif t == "q6":
ops[i] = self.get_q6(thetas, phis) if len(
thetas) > 0 else None
# Then, deal with the Peters-style OPs that are tailor-made
# to recognize common structural motifs
# (Peters, J. Chem. Phys., 131, 244103, 2009;
# Zimmermann et al., J. Am. Chem. Soc., under revision, 2015).
if self._geomops:
gaussthetak = [0.0 for t in self._types] # not used by all OPs
qsptheta = [[] for t in self._types] # not used by all OPs
ipi = 1.0 / pi
piover2 = pi / 2.0
tetangoverpi = math.acos(-1.0 / 3.0) * ipi
itetangminuspihalfoverpi = 1.0 / (tetangoverpi - 0.5)
for j in range(nneigh): # Neighbor j is put to the North pole.
zaxis = rijnorm[j]
for i, t in enumerate(self._types):
qsptheta[i].append(0.0)
for k in range(nneigh): # From neighbor k, we construct
if j != k: # the prime meridian.
tmp = max(
-1.0, min(np.inner(zaxis, rijnorm[k]), 1.0))
thetak = math.acos(tmp)
xaxistmp = gramschmidt(rijnorm[k], zaxis)
if np.linalg.norm(xaxistmp) < very_small:
flag_xaxis = True
else:
xaxis = xaxistmp / np.linalg.norm(xaxistmp)
flag_xaxis = False
# Contributions of j-i-k angles, where i represents the central atom
# and j and k two of the neighbors.
for i, t in enumerate(self._types):
if t == "lin":
tmp = self._paras[i][0] * (thetak * ipi - 1.0)
ops[i] += exp(-0.5 * tmp * tmp)
elif t == "bent":
tmp = self._paras[i][1] * (
thetak * ipi - self._paras[i][0])
ops[i] += exp(-0.5 * tmp * tmp)
elif t == "tet":
tmp = self._paras[i][0] * (
thetak * ipi - tetangoverpi)
gaussthetak[i] = math.exp(-0.5 * tmp * tmp)
elif t == "oct":
if thetak >= self._paras[i][0]:
# k is south pole to j
tmp = self._paras[i][1] * (
thetak * ipi - 1.0)
ops[i] += 3.0 * math.exp(-0.5 * tmp * tmp)
elif t == "bcc" and j < k:
if thetak >= self._paras[i][0]:
# k is south pole to j
tmp = self._paras[i][1] * (
thetak * ipi - 1.0)
ops[i] += 6.0 * math.exp(-0.5 * tmp * tmp)
elif t == "sq_pyr":
tmp = self._paras[i][0] * (thetak * ipi - 0.5)
qsptheta[i][j] = qsptheta[i][j] + exp(-0.5 * tmp * tmp)
elif t == "tri_bipyr":
if thetak >= self._paras[i][0]:
tmp = self._paras[i][1] * (
thetak * ipi - 1.0)
qsptheta[i][j] = 2.0 * math.exp(-0.5 * tmp * tmp)
for m in range(nneigh):
if (m != j) and (m != k) and (not flag_xaxis):
tmp = max(
-1.0, min(np.inner(zaxis, rijnorm[m]), 1.0))
thetam = math.acos(tmp)
xtwoaxistmp = gramschmidt(rijnorm[m], zaxis)
l = np.linalg.norm(xtwoaxistmp)
if l < very_small:
flag_xtwoaxis = True
else:
xtwoaxis = xtwoaxistmp / l
phi = math.acos(max(
-1.0,
min(np.inner(xtwoaxis, xaxis), 1.0)))
flag_xtwoaxis = False
# Contributions of j-i-m angle and
# angles between plane j-i-k and i-m vector.
if not flag_xaxis and not flag_xtwoaxis:
for i, t in enumerate(self._types):
if t == "tet":
tmp = self._paras[i][0] * (
thetam * ipi - tetangoverpi)
ops[i] += gaussthetak[i] * math.exp(
-0.5 * tmp * tmp) * math.cos(
3.0 * phi)
elif t == "oct":
if thetak < self._paras[i][0] and \
thetam < \
self._paras[i][0]:
tmp = math.cos(2.0 * phi)
tmp2 = self._paras[i][2] * (
thetam * ipi - 0.5)
ops[i] += tmp * tmp * \
self._paras[i][4] * (
math.exp(
-0.5 * tmp2 * tmp2) - \
self._paras[i][3])
elif t == "bcc" and j < k:
if thetak < self._paras[i][0]:
if thetak > piover2:
fac = 1.0
else:
fac = -1.0
tmp = (thetam - piover2) / (
19.47 * pi / 180.0)
ops[i] += fac * math.cos(
3.0 * phi) * \
1.6 * tmp * \
math.exp(
-0.5 * tmp * tmp)
elif t == "tri_bipyr":
if thetak < self._paras[i][0] and \
thetam < self._paras[i][0]:
tmp = math.cos(1.5 * phi)
tmp2 = self._paras[i][2] * (
thetam * ipi - 0.5)
qsptheta[i][j] += \
tmp * tmp * math.exp( \
-0.5 * tmp2 * tmp2)
# Normalize Peters-style OPs.
for i, t in enumerate(self._types):
if t == "lin":
ops[i] = ops[i] / float(nneigh * (
nneigh - 1)) if nneigh > 1 else None
elif t == "bent":
ops[i] = ops[i] / float(nneigh * (
nneigh - 1)) if nneigh > 1 else None
elif t == "tet":
ops[i] = ops[i] / float(nneigh * (nneigh - 1) * (
nneigh - 2)) if nneigh > 2 else None
elif t == "oct":
ops[i] = ops[i] / float(nneigh * (3 + (nneigh - 2) * (
nneigh - 3))) if nneigh > 3 else None
elif t == "bcc":
ops[i] = ops[i] / float(0.5 * float(
nneigh * (6 + (nneigh - 2) * (nneigh - 3)))) \
if nneigh > 3 else None
elif t == "sq_pyr":
if nneigh > 1:
dmean = np.mean(dist)
acc = 0.0
for d in dist:
tmp = self._paras[i][1] * (d - dmean)
acc = acc + exp(-0.5 * tmp * tmp)
ops[i] = acc * max(qsptheta[i]) / float(
nneigh * (nneigh - 1))
else:
ops[i] = None
elif t == "tri_bipyr":
ops[i] = max(qsptheta[i]) / float(
2 + (nneigh - 2) * (nneigh - 3)) if nneigh > 3 \
else None
# Then, deal with the new-style OPs that require vectors between
# neighbors.
if self._geomops2:
# Compute all (unique) angles and sort the resulting list.
aij = []
for ir, r in enumerate(rijnorm):
for j in range(ir+1, len(rijnorm)):
aij.append(math.acos(max(-1.0, min(np.inner(
r, rijnorm[j]), 1.0))))
aijs = sorted(aij)
# Compute height, side and diagonal length estimates.
neighscent = np.array([0.0, 0.0, 0.0])
for j, neigh in enumerate(neighsites):
neighscent = neighscent + neigh.coords
if nneigh > 0:
neighscent = (neighscent / float(nneigh))
h = np.linalg.norm(neighscent - centvec)
b = min(distjk_unique) if len(distjk_unique) > 0 else 0
dhalf = max(distjk_unique) / 2.0 if len(distjk_unique) > 0 else 0
for i, t in enumerate(self._types):
if t == "reg_tri" or t == "sq":
if nneigh < 3:
ops[i] = None
else:
ops[i] = 1.0
if t == "reg_tri":
a = 2.0 * asin(b / (2.0 * sqrt(h*h + (b / (
2.0 * cos(3.0 * pi / 18.0)))**2.0)))
nmax = 3
else:
a = 2.0 * asin(b / (2.0 * sqrt(h*h + dhalf*dhalf)))
nmax = 4
for j in range(min([nneigh,nmax])):
ops[i] = ops[i] * exp(-0.5 * ((
aijs[j] - a) * self._paras[i][0])**2)
return ops
| mit |
kjung/scikit-learn | examples/neural_networks/plot_mlp_alpha.py | 19 | 4088 | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
Scapogo/zipline | zipline/pipeline/factors/technical.py | 2 | 24137 | """
Technical Analysis Factors
--------------------------
"""
from __future__ import division
from numbers import Number
from numpy import (
abs,
arange,
average,
clip,
diff,
dstack,
exp,
fmax,
full,
inf,
isnan,
log,
NINF,
sqrt,
sum as np_sum,
)
from numexpr import evaluate
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded, expect_types
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nansum,
nanmin,
)
from zipline.utils.numpy_utils import (
float64_dtype,
ignore_nanwarnings,
rolling_window,
)
from .factor import CustomFactor
class Returns(CustomFactor):
"""
Calculates the percent change in close price over the given window_length.
**Default Inputs**: [USEquityPricing.close]
"""
inputs = [USEquityPricing.close]
window_safe = True
def _validate(self):
super(Returns, self)._validate()
if self.window_length < 2:
raise ValueError(
"'Returns' expected a window length of at least 2, but was "
"given {window_length}. For daily returns, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
class RSI(CustomFactor, SingleInputMixin):
"""
Relative Strength Index
**Default Inputs**: [USEquityPricing.close]
**Default Window Length**: 15
"""
window_length = 15
inputs = (USEquityPricing.close,)
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class SimpleMovingAverage(CustomFactor, SingleInputMixin):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = (USEquityPricing.close, USEquityPricing.volume)
class MaxDrawdown(CustomFactor, SingleInputMixin):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[:end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
class AverageDollarVolume(CustomFactor):
"""
Average Daily Dollar Volume
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = [USEquityPricing.close, USEquityPricing.volume]
def compute(self, today, assets, out, close, volume):
out[:] = nansum(close * volume, axis=0) / len(close)
def exponential_weights(length, decay_rate):
"""
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64]
"""
return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1)
class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor):
"""
Base class for factors implementing exponential-weighted operations.
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list or tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Methods
-------
weights
from_span
from_halflife
from_center_of_mass
"""
params = ('decay_rate',)
@classmethod
@expect_types(span=Number)
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[USEquityPricing.close],
window_length=30,
span=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
@expect_types(halflife=Number)
def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[USEquityPricing.close],
window_length=30,
halflife=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[USEquityPricing.close],
window_length=30,
center_of_mass=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
)
class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Average
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMA``.
See Also
--------
:func:`pandas.ewma`
"""
def compute(self, today, assets, out, data, decay_rate):
out[:] = average(
data,
axis=0,
weights=exponential_weights(len(data), decay_rate),
)
class LinearWeightedMovingAverage(CustomFactor, SingleInputMixin):
"""
Weighted Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
ndays = data.shape[0]
# Initialize weights array
weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1)
# Compute normalizer
normalizer = (ndays * (ndays + 1)) / 2
# Weight the data
weighted_data = data * weights
# Compute weighted averages
out[:] = nansum(weighted_data, axis=0) / normalizer
class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Standard Deviation
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMSTD``.
See Also
--------
:func:`pandas.ewmstd`
"""
def compute(self, today, assets, out, data, decay_rate):
weights = exponential_weights(len(data), decay_rate)
mean = average(data, axis=0, weights=weights)
variance = average((data - mean) ** 2, axis=0, weights=weights)
squared_weight_sum = (np_sum(weights) ** 2)
bias_correction = (
squared_weight_sum / (squared_weight_sum - np_sum(weights ** 2))
)
out[:] = sqrt(variance * bias_correction)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ('k',)
inputs = (USEquityPricing.close,)
outputs = 'lower', 'middle', 'upper'
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator # noqa
**Defaults Inputs:** USEquityPricing.low, USEquityPricing.high
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
"""
inputs = (USEquityPricing.low, USEquityPricing.high)
outputs = ('down', 'up')
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
'(100 * high_date_index) / (wl - 1)',
local_dict={
'high_date_index': high_date_index,
'wl': wl,
},
out=out.up,
)
evaluate(
'(100 * low_date_index) / (wl - 1)',
local_dict={
'low_date_index': low_date_index,
'wl': wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data: `zipline.pipeline.data.USEquityPricing.close`
:data: `zipline.pipeline.data.USEquityPricing.low`
:data: `zipline.pipeline.data.USEquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (USEquityPricing.close, USEquityPricing.low, USEquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
'((tc - ll) / (hh - ll)) * 100',
local_dict={
'tc': today_closes,
'll': lowest_lows,
'hh': highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud # noqa
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
"""
params = {
'tenkan_sen_length': 9,
'kijun_sen_length': 26,
'chikou_span_length': 26,
}
inputs = (USEquityPricing.high, USEquityPricing.low, USEquityPricing.close)
outputs = (
'tenkan_sen',
'kijun_sen',
'senkou_span_a',
'senkou_span_b',
'chikou_span',
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
'%s must be <= the window_length: %s > %s' % (
k, v, self.window_length,
),
)
def compute(self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) +
low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) +
low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate('((tc - pc) / pc) * 100',
local_dict={
'tc': today_close,
'pc': prev_close
},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
USEquityPricing.high,
USEquityPricing.low,
USEquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack((
high_to_low,
high_to_prev_close,
low_to_prev_close,
)),
2
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (USEquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ('fast_period', 'slow_period', 'signal_period')
@expect_bounded(
__funcname='MACDSignal',
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls,
fast_period=12,
slow_period=26,
signal_period=9,
*args,
**kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args, **kwargs
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(
data,
axis=1,
weights=exponential_weights(length, decay_rate)
)
def compute(self, today, assets, out, close, fast_period, slow_period,
signal_period):
slow_EWMA = self._ewma(
rolling_window(close, slow_period),
slow_period
)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:],
fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
class AnnualizedVolatility(CustomFactor):
"""
Volatility. The degree of variation of a series over time as measured by
the standard deviation of daily returns.
https://en.wikipedia.org/wiki/Volatility_(finance)
**Default Inputs:** :data:`zipline.pipeline.factors.Returns(window_length=2)` # noqa
Parameters
----------
annualization_factor : float, optional
The number of time units per year. Defaults is 252, the number of NYSE
trading days in a normal year.
"""
inputs = [Returns(window_length=2)]
params = {'annualization_factor': 252.0}
window_length = 252
def compute(self, today, assets, out, returns, annualization_factor):
out[:] = nanstd(returns, axis=0) * (annualization_factor ** .5)
# Convenience aliases.
EWMA = ExponentialWeightedMovingAverage
EWMSTD = ExponentialWeightedMovingStdDev
MACDSignal = MovingAverageConvergenceDivergenceSignal
| apache-2.0 |
rishita/mxnet | example/dec/dec.py | 15 | 7064 | # pylint: skip-file
from __future__ import print_function
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], int(Y[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| apache-2.0 |
Akshay-lodha/MTP | src/models/visualize_model.py | 2 | 3005 | """
A script to visualize layers and filters in the conv net model
"""
import tflearn
from cnn_model import CNNModel
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.ndimage import imread, zoom
def create_mosaic(image, nrows, ncols):
"""
Tiles all the layers in nrows x ncols
Args:
------
image = 3d numpy array of M * N * number of filters dimensions
nrows = integer representing number of images in a row
ncol = integer representing number of images in a column
returns formatted image
"""
M = image.shape[0]
N = image.shape[1]
npad = ((1,1), (1,1), (0,0))
image = np.pad(image, pad_width = npad, mode = 'constant',\
constant_values = 0)
M += 2
N += 2
image = image.reshape(M, N, nrows, ncols)
image = np.transpose(image, (2,0,3,1))
image = image.reshape(M*nrows, N*ncols)
return image
def get_layer_output(layer, model, inp):
"""
Returns model layer output
Args
----
layer: cnn layer
model: cnn model
inp: input image
"""
m2 = tflearn.DNN(layer, session = model.session)
yhat = m2.predict(inp.reshape(-1, inp.shape[0], inp.shape[1], 1))
yhat_1 = np.array(yhat[0])
return m2, yhat_1
def plot_layers(image, idx, pltfilename, size = 12, cmapx = 'magma'):
"""
plot filter output in layers
Args
----
image: layer output of form M x N x nfilt
idx: layer number
pltfilename = a string representing filename
"""
nfilt = image.shape[-1]
mosaic = create_mosaic(image, nfilt/4, 4)
plt.figure(figsize = (size, size))
plt.imshow(mosaic, cmap = cmapx)
plt.axis('off')
plt.savefig(pltfilename + str(idx)+'.png', bbox_inches='tight')
#plt.show()
def get_weights(m2, layer):
"""
get a layer's weights
Args:
------
m2: model input
layer = layer in question
Returns:
weights
"""
weights = m2.get_weights(layer.W)
print weights.shape
weights =\
weights.reshape(weights.shape[0], weights.shape[1], weights.shape[-1])
return weights
def plot_single_output(image, size = 6):
plt.figure(figsize = (size, size))
plt.imshow(mosaic, cmap = 'magma')
plt.axis('off')
plt.savefig('filterout' + '.png', bbox_inches='tight')
def main():
### Plot layer
filename = '../data/test/image_21351.jpg'
inp = imread(filename).astype('float32')
convnet = CNNModel()
conv_layer_1, conv_layer_2, conv_layer_3, network =\
convnet.define_network(inp.reshape(-1, inp.shape[0], inp.shape[1], 1), 'visual')
model = tflearn.DNN(network, tensorboard_verbose=0,\
checkpoint_path='nodule3-classifier.tfl.ckpt')
model.load("nodule3-classifier.tfl")
print model.predict(inp.reshape(-1, 50, 50, 1))
layers_to_be_plotted = [conv_layer_1, conv_layer_2, conv_layer_3]
#plot_layers(conv_layer_1, model, inp)
for idx, layer in enumerate(layers_to_be_plotted):
m2, yhat = get_layer_output(layer, model, inp)
plot_layers(yhat, idx, 'conv_layer_')
weights = get_weights(m2, conv_layer_1)
plot_layers(weights, 0, 'weight_conv_layer_', 6, 'gray')
if __name__ == "__main__":
main()
| mit |
jstoxrocky/statsmodels | statsmodels/tsa/statespace/tools.py | 19 | 12762 | """
Statespace Tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from statsmodels.tools.data import _is_using_pandas
from . import _statespace
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError: # pragma: no cover
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return prefix, dtype, None
prefix_dtype_map = {
's': np.float32, 'd': np.float64, 'c': np.complex64, 'z': np.complex128
}
prefix_statespace_map = {
's': _statespace.sStatespace, 'd': _statespace.dStatespace,
'c': _statespace.cStatespace, 'z': _statespace.zStatespace
}
prefix_kalman_filter_map = {
's': _statespace.sKalmanFilter, 'd': _statespace.dKalmanFilter,
'c': _statespace.cKalmanFilter, 'z': _statespace.zKalmanFilter
}
def companion_matrix(polynomial):
r"""
Create a companion matrix
Parameters
----------
polynomial : array_like, optional.
If an iterable, interpreted as the coefficients of the polynomial from
which to form the companion matrix. Polynomial coefficients are in
order of increasing degree. If an integer, the size of the companion
matrix (the polynomial coefficients are then set to zeros).
Returns
-------
companion_matrix : array
Notes
-----
Returns a matrix of the form
.. math::
\begin{bmatrix}
\phi_1 & 1 & 0 & \cdots & 0 \\
\phi_2 & 0 & 1 & & 0 \\
\vdots & & & \ddots & 0 \\
& & & & 1 \\
\phi_n & 0 & 0 & \cdots & 0 \\
\end{bmatrix}
where some or all of the :math:`\phi_i` may be non-zero (if `polynomial` is
None, then all are equal to zero).
If the coefficients provided are :math:`(c_0, c_1, \dots, c_{n})`,
then the companion matrix is an :math:`n \times n` matrix formed with the
elements in the first column defined as
:math:`\phi_i = -\frac{c_i}{c_0}, i \in 1, \dots, n`.
"""
if isinstance(polynomial, int):
n = polynomial
polynomial = None
else:
n = len(polynomial) - 1
polynomial = np.asanyarray(polynomial)
matrix = np.zeros((n, n))
idx = np.diag_indices(n - 1)
idx = (idx[0], idx[1] + 1)
matrix[idx] = 1
if polynomial is not None and n > 0:
matrix[:, 0] = -polynomial[1:] / polynomial[0]
return matrix
def diff(series, k_diff=1, k_seasonal_diff=None, k_seasons=1):
r"""
Difference a series simply and/or seasonally along the zero-th axis.
Given a series (denoted :math:`y_t`), performs the differencing operation
.. math::
\Delta^d \Delta_s^D y_t
where :math:`d =` `diff`, :math:`s =` `k_seasons`,
:math:`D =` `seasonal\_diff`, and :math:`\Delta` is the difference
operator.
Parameters
----------
series : array_like
The series to be differenced.
diff : int, optional
The number of simple differences to perform. Default is 1.
seasonal_diff : int or None, optional
The number of seasonal differences to perform. Default is no seasonal
differencing.
k_seasons : int, optional
The seasonal lag. Default is 1. Unused if there is no seasonal
differencing.
Returns
-------
differenced : array
The differenced array.
"""
pandas = _is_using_pandas(series, None)
differenced = np.asanyarray(series) if not pandas else series
# Seasonal differencing
if k_seasonal_diff is not None:
while k_seasonal_diff > 0:
if not pandas:
differenced = (
differenced[k_seasons:] - differenced[:-k_seasons]
)
else:
differenced = differenced.diff(k_seasons)[k_seasons:]
k_seasonal_diff -= 1
# Simple differencing
if not pandas:
differenced = np.diff(differenced, k_diff, axis=0)
else:
while k_diff > 0:
differenced = differenced.diff()[1:]
k_diff -= 1
return differenced
def is_invertible(polynomial, threshold=1.):
r"""
Determine if a polynomial is invertible.
Requires all roots of the polynomial lie inside the unit circle.
Parameters
----------
polynomial : array_like
Coefficients of a polynomial, in order of increasing degree.
For example, `polynomial=[1, -0.5]` corresponds to the polynomial
:math:`1 - 0.5x` which has root :math:`2`.
threshold : number
Allowed threshold for `is_invertible` to return True. Default is 1.
Notes
-----
If the coefficients provided are :math:`(c_0, c_1, \dots, c_n)`, then
the corresponding polynomial is :math:`c_0 + c_1 L + \dots + c_n L^n`.
There are three equivalent methods of determining if the polynomial
represented by the coefficients is invertible:
The first method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (1 - \lambda_1 L)
(1 - \lambda_2 L) \dots (1 - \lambda_n L)
In order for :math:`C(L)` to be invertible, it must be that each factor
:math:`(1 - \lambda_i L)` is invertible; the condition is then that
:math:`|\lambda_i| < 1`, where :math:`\lambda_i` is a root of the
polynomial.
The second method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (L - \zeta_1 L) (L - \zeta_2) \dots (L - \zeta_3)
The condition is now :math:`|\zeta_i| > 1`, where :math:`\zeta_i` is a root
of the polynomial with reversed coefficients and
:math:`\lambda_i = \frac{1}{\zeta_i}`.
Finally, a companion matrix can be formed using the coefficients of the
polynomial. Then the eigenvalues of that matrix give the roots of the
polynomial. This last method is the one actually used.
See Also
--------
companion_matrix
"""
# First method:
# np.all(np.abs(np.roots(np.r_[1, params])) < 1)
# Second method:
# np.all(np.abs(np.roots(np.r_[1, params][::-1])) > 1)
# Final method:
eigvals = np.linalg.eigvals(companion_matrix(polynomial))
return np.all(np.abs(eigvals) < threshold)
def constrain_stationary_univariate(unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
Returns
-------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
References
----------
Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = unconstrained.shape[0]
y = np.zeros((n, n), dtype=unconstrained.dtype)
r = unconstrained/((1 + unconstrained**2)**0.5)
for k in range(n):
for i in range(k):
y[k, i] = y[k - 1, i] + r[k] * y[k - 1, k - i - 1]
y[k, k] = r[k]
return -y[n - 1, :]
def unconstrain_stationary_univariate(constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
Returns
-------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
References
----------
Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = constrained.shape[0]
y = np.zeros((n, n), dtype=constrained.dtype)
y[n-1:] = -constrained
for k in range(n-1, 0, -1):
for i in range(k):
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
r = y.diagonal()
x = r / ((1 - r**2)**0.5)
return x
def validate_matrix_shape(name, shape, nrows, ncols, nobs):
"""
Validate the shape of a possibly time-varying matrix, or raise an exception
Parameters
----------
name : str
The name of the matrix being validated (used in exception messages)
shape : array_like
The shape of the matrix to be validated. May be of size 2 or (if
the matrix is time-varying) 3.
nrows : int
The expected number of rows.
ncols : int
The expected number of columns.
nobs : int
The number of observations (used to validate the last dimension of a
time-varying matrix)
Raises
------
ValueError
If the matrix is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [2, 3]:
raise ValueError('Invalid value for %s matrix. Requires a'
' 2- or 3-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the matrix
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
if not shape[1] == ncols:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' columns, got %d' % (name, ncols, shape[1]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 2 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s matrix: time-varying'
' matrices cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 3 and nobs is not None and not shape[-1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' matrix. Requires shape (*,*,%d), got %s' %
(name, nobs, str(shape)))
def validate_vector_shape(name, shape, nrows, nobs):
"""
Validate the shape of a possibly time-varying vector, or raise an exception
Parameters
----------
name : str
The name of the vector being validated (used in exception messages)
shape : array_like
The shape of the vector to be validated. May be of size 1 or (if
the vector is time-varying) 2.
nrows : int
The expected number of rows (elements of the vector).
nobs : int
The number of observations (used to validate the last dimension of a
time-varying vector)
Raises
------
ValueError
If the vector is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [1, 2]:
raise ValueError('Invalid value for %s vector. Requires a'
' 1- or 2-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the vector
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s vector: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 1 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s vector: time-varying'
' vectors cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 2 and not shape[1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' vector. Requires shape (*,%d), got %s' %
(name, nobs, str(shape)))
| bsd-3-clause |
kashif/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
christabor/MoAL | MOAL/algorithms/time_series/dynamic_timewarping.py | 1 | 1635 | # -*- coding: utf-8 -*-
__author__ = """Chris Tabor ([email protected])"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
import mlpy
import matplotlib.pyplot as plot
import matplotlib.cm as cm
from random import randrange as rr
DEBUG = True if __name__ == '__main__' else False
def random_timesequence(start, end, steps=3):
seq = []
for n in range(start, end):
# Randomize the number of sub-steps,
# but maintain the bounds and monotonicity
# (e.g. 0, 0, 1, 1, 1, 2, 3, 3, 3)
for i in range(rr(0, steps)):
seq.append(n)
return seq
if DEBUG:
with Section('Dynamic Time Warping algorithm - MLPY'):
# Using MLPY:
# First, make sure deps are setup.
# `brew install gsl`
# Download from SF: http://mlpy.sourceforge.net/
# Then install using setup.py:
# `cd MLPY_PATH/setup.py install`
# Now this makes it fun.
x, y = random_timesequence(0, 10), random_timesequence(0, 10)
# Taken from examples: http://mlpy.sourceforge.net/docs/3.5/dtw.html#id3
distance, cost, path = mlpy.dtw_std(x, y, dist_only=False)
fig = plot.figure(1)
axes = fig.add_subplot(111)
plot1 = plot.imshow(
cost.T, origin='lower', cmap=cm.gray, interpolation='nearest')
plot2 = plot.plot(path[0], path[1], 'w')
bound = 0.5
xlim = axes.set_xlim((-bound, cost.shape[0] - bound))
ylim = axes.set_ylim((-bound, cost.shape[1] - bound))
plot.show()
| apache-2.0 |
hugobowne/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 25 | 45729 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
avmarchenko/exatomic | exatomic/qchem/output.py | 2 | 2088 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Q-Chem Ouput Editor
#######################
Editor classes for simple Q-Chem output files
"""
import six
import numpy as np
import pandas as pd
from exa import TypedMeta
from exa.util.units import Length#, Energy
from .editor import Editor
from exatomic.base import sym2z
from exatomic.core.atom import Atom, Frequency
from exatomic.core.frame import Frame#, compute_frame_from_atom
from exatomic.core.basis import (BasisSet, BasisSetOrder, Overlap)#, deduplicate_basis_sets)
from exatomic.core.orbital import Orbital, MOMatrix, Excitation
#from exatomic.algorithms.basis import lmap, lorder
class QMeta(TypedMeta):
atom = Atom
basis_set = BasisSet
orbital = Orbital
momatrix = MOMatrix
basis_set_order = BasisSetOrder
frame = Frame
excitation = Excitation
frequency = Frequency
overlap = Overlap
multipole = pd.DataFrame
class Output(six.with_metaclass(QMeta, Editor)):
def parse_atom(self):
# Atom flags
_regeom01 = "Standard Nuclear Orientation (Angstroms)"
_regeom02 = "Coordinates (Angstroms)"
# Find Data
found = self.find(_regeom01, keys_only=True)
starts = np.array(found) + 3
stop = starts[0]
while '-------' not in self[stop]: stop += 1
stops = starts + (stop - starts[0])
dfs = []
for i, (start, stop) in enumerate(zip(starts, stops)):
atom = self.pandas_dataframe(start, stop, 5)
atom['frame'] = i
dfs.append(atom)
atom = pd.concat(dfs).reset_index(drop=True)
atom.columns = ['set', 'symbol', 'x', 'y', 'z', 'frame']
atom['set'] -= 1
atom['x'] *= Length['Angstrom', 'au']
atom['y'] *= Length['Angstrom', 'au']
atom['z'] *= Length['Angstrom', 'au']
atom['Z'] = atom['symbol'].map(sym2z)
self.atom = atom
def __init__(self, *args, **kwargs):
super(Output, self).__init__(*args,**kwargs)
| apache-2.0 |
drabastomek/practicalDataAnalysisCookbook | Codes/Chapter04/clustering_kmeans.py | 1 | 1172 | # this is needed to load helper from the parent folder
import sys
sys.path.append('..')
# the rest of the imports
import helper as hlp
import pandas as pd
import sklearn.cluster as cl
import sklearn.metrics as mt
@hlp.timeit
def findClusters_kmeans(data):
'''
Cluster data using k-means
'''
# create the classifier object
kmeans = cl.KMeans(
n_clusters=4,
n_jobs=-1,
verbose=0,
n_init=30
)
# fit the data
return kmeans.fit(data)
# the file name of the dataset
r_filename = '../../Data/Chapter04/bank_contacts.csv'
# read the data
csv_read = pd.read_csv(r_filename)
# select variables
selected = csv_read[['n_duration','n_nr_employed',
'prev_ctc_outcome_success','n_euribor3m',
'n_cons_conf_idx','n_age','month_oct',
'n_cons_price_idx','edu_university_degree','n_pdays',
'dow_mon','job_student','job_technician',
'job_housemaid','edu_basic_6y']]
# cluster the data
cluster = findClusters_kmeans(selected)
# assess the clusters effectiveness
labels = cluster.labels_
centroids = cluster.cluster_centers_
hlp.printClustersSummary(selected, labels, centroids) | gpl-2.0 |
TJXUNwu/FileTest | extractsingleftr.py | 1 | 2183 | #coding=utf-8
import sys,os
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import dirlist
caffe_root = 'D:/CaffeProject/CaffeTest/'
sys.path.insert(0, caffe_root + 'python')
import caffe
caffe.set_mode_cpu()
model_file='D:/CaffeProject/CaffeTest/models/bvlc_reference_caffenet/deploy.prototxt'
weight_file='D:/CaffeProject/CaffeTest/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
mean_file='D:/CaffeProject/CaffeTest/python/caffe/imagenet/ilsvrc_2012_mean.npy'
imgfile='E:/FaceData/LFW/Aaron_Peirsol/Aaron_Peirsol_0004.jpg'
imaglistfile='D:/CaffeProject/CaffeTest/data/ilsvrc12/synset_words.txt'
net = caffe.Net(model_file,weight_file, caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(mean_file).mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# set net to batch size of 50
net.blobs['data'].reshape(50,3,227,227)
net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(imgfile))
out = net.forward()
print("Predicted class is #{}.".format(out['prob'][0].argmax()))
# load labels
#imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'
try:
labels = np.loadtxt(imaglistfile, str, delimiter='\t')
except:
labels = np.loadtxt(imaglistfile, str, delimiter='\t')
# sort top k predictions from softmax output
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
print labels[top_k]
print '\nlayername and parames:'
for k, v in net.blobs.items():
print [(k, v.data.shape)]
fet=net.blobs['fc7'].data[0]
sio.savemat('F:\\TJProgram\\MatlabPro\\FtrVal\\fet.mat', {'fet':fet})
#plt.plot(feat.flat)
#plt.show()
#fet=net.params['fc7'][0].data
#sio.savemat('savefet.mat',{'fet':fet})
print 'Save OK !'
| gpl-3.0 |
yanlend/scikit-learn | sklearn/decomposition/nmf.py | 5 | 39512 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..utils import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
eps: float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(beta)
* np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization correponds to decrease each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom':
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H, given in 'fit' method.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
brainiak/brainiak | tests/funcalign/test_srm.py | 1 | 11513 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.exceptions import NotFittedError
import pytest
def test_can_instantiate(tmp_path):
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
voxels = 100
samples = 500
subjects = 2
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform(X)
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
s.fit(X)
print("Test: running SRM with 1 subject")
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that runs with 2 subject
s.fit(X)
from pathlib import Path
sr_v0_4 = np.load(Path(__file__).parent / "sr_v0_4.npz")['sr']
assert(np.allclose(sr_v0_4, s.s_))
assert len(s.w_) == subjects, (
"Invalid computation of SRM! (wrong # subjects in W)")
for subject in range(subjects):
assert s.w_[subject].shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels in W)")
assert s.w_[subject].shape[1] == features, (
"Invalid computation of SRM! (wrong # features in W)")
ortho = np.linalg.norm(s.w_[subject].T.dot(s.w_[subject])
- np.eye(s.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in SRM."
difference = np.linalg.norm(X[subject] - s.w_[subject].dot(s.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert s.s_.shape[0] == features, (
"Invalid computation of SRM! (wrong # features in S)")
assert s.s_.shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = s.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of SRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_s[subject].shape[0] == features, (
"Invalid computation of SRM! (wrong # features after transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples after transform)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
s.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
s.set_params(features=(samples+1))
s.fit(X)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
S2 = S[:, :-2]
X.append(Q.dot(S2))
with pytest.raises(ValueError):
s.fit(X)
print("Test: different number of samples per subject")
# Check save/load functionality for fitted SRM
srm_path = tmp_path / 'srm.npz'
s.save(srm_path)
s_load = brainiak.funcalign.srm.load(srm_path)
assert np.array_equal(s.s_, s_load.s_)
for w, wl in zip(s.w_, s_load.w_):
assert np.array_equal(w, wl)
assert np.array_equal(s.sigma_s_, s_load.sigma_s_)
assert np.array_equal(s.mu_, s_load.mu_)
assert np.array_equal(s.rho2_, s_load.rho2_)
assert s.features == s_load.features
assert s.n_iter == s_load.n_iter
assert s.rand_seed == s_load.rand_seed
print("Test: save/load functionality")
def test_new_subject():
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
voxels = 100
samples = 500
subjects = 3
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform_subject(X)
print("Test: transforming before fitting the model")
# Check that runs with 3 subject
s.fit(X)
# Check that you get an error when the data is the wrong shape
with pytest.raises(ValueError):
s.transform_subject(X[0].T)
# Check that it does run to compute a new subject
new_w = s.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
# Check that these analyses work with the deterministic SRM too
ds = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
ds.transform_subject(X)
print("Test: transforming before fitting the model")
# Check that runs with 3 subject
ds.fit(X)
# Check that you get an error when the data is the wrong shape
with pytest.raises(ValueError):
ds.transform_subject(X[0].T)
# Check that it does run to compute a new subject
new_w = ds.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
def test_det_srm():
import brainiak.funcalign.srm
model = brainiak.funcalign.srm.DetSRM()
assert model, "Invalid DetSRM instance!"
import numpy as np
voxels = 100
samples = 500
subjects = 2
features = 3
model = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)
assert model, "Invalid DetSRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
model.transform(X)
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
model.fit(X)
print("Test: running DetSRM with 1 subject")
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that runs with 2 subject
model.fit(X)
assert len(model.w_) == subjects, (
"Invalid computation of DetSRM! (wrong # subjects in W)")
for subject in range(subjects):
assert model.w_[subject].shape[0] == voxels, (
"Invalid computation of DetSRM! (wrong # voxels in W)")
assert model.w_[subject].shape[1] == features, (
"Invalid computation of DetSRM! (wrong # features in W)")
ortho = np.linalg.norm(model.w_[subject].T.dot(model.w_[subject])
- np.eye(model.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in DetSRM."
difference = np.linalg.norm(X[subject]
- model.w_[subject].dot(model.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert model.s_.shape[0] == features, (
"Invalid computation of DetSRM! (wrong # features in S)")
assert model.s_.shape[1] == samples, (
"Invalid computation of DetSRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = model.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of DetSRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_s[subject].shape[0] == features, (
"Invalid computation of DetSRM! (wrong # features after "
"transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of DetSRM! (wrong # samples after transform)")
# Check that it does run to compute a new subject
new_w = model.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
model.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
model.set_params(features=(samples+1))
model.fit(X)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
S2 = S[:, :-2]
X.append(Q.dot(S2))
with pytest.raises(ValueError):
model.fit(X)
print("Test: different number of samples per subject")
| apache-2.0 |
codehacken/Kb4ML | lib/models/classify.py | 1 | 4185 | __author__ = 'ashwin'
__email__ = '[email protected]'
""""
Implement Standard classifiers.
"""
# Implementing the std. Naive Bayes Algorithm.
# Classification is based on Maximum-Likelihood for selecting the final class.
from sklearn.feature_extraction import DictVectorizer
from sklearn.naive_bayes import BernoulliNB
import numpy as np
from sklearn import metrics
class NaiveBayes:
def __init__(self):
self.feature_vector = DictVectorizer(sparse=False)
self.class_vector = {}
self.nb_model = BernoulliNB()
self.class_var = None
# This function converts a set of class variable values to integer eq.
# This is can then be used as the class ID.
def data2vector(self, class_data, reset=False):
if reset:
self.class_vector = {}
# The final vector of integer for the class variables.
transform_vector = []
# The conversion is a simple one taking values from 0 to
# x where x+1 type of values are there for the class variable.
idx = 0
for data_point in class_data:
if data_point[data_point.keys()[0]] not in self.class_vector:
self.class_vector[data_point[data_point.keys()[0]]] = idx
idx += 1
transform_vector.append(self.class_vector[data_point[data_point.keys()[0]]])
return np.array(transform_vector)
def vectorize_data(self, file_feature_data, file_class_result, if_train=True):
# Vectorize the training data.
if if_train:
transformed_feature_data = self.feature_vector.fit_transform(file_feature_data)
else:
transformed_feature_data = self.feature_vector.transform(file_feature_data)
# Vectorize the training data results (that is the class results applied to the same set)
transformed_class_data = self.data2vector(file_class_result)
return [transformed_feature_data, transformed_class_data]
def train_model(self, ft_data, cl_data):
return self.nb_model.fit(ft_data, cl_data)
def predict(self, ft_data):
return self.nb_model.predict(ft_data)
@staticmethod
def get_accuracy_score(train_cl_real, test_cl_predict):
return metrics.accuracy_score(train_cl_real, test_cl_predict)
"""
Sample Code:
Using NB Created. Use File Reader to read the file and get the data.
file_data is then passed to the Naive Bayes to train the model.
naive_b = NaiveBayes(file_reader.col_var, file_reader.class_var)
naive_b.train_model(file_data)
"""
class DiscreteNaiveBayes:
def __init__(self, var_names, class_var_name):
self.factor_freq = {}
self.result_freq = {}
self.class_var_name = class_var_name
self.__reset__(var_names)
# The __reset__ method is used to reset the trained model vector.
# This resets all the frequency counts to 0.
def __reset__(self, var_names):
# Setup the factor_freq mapping.
for var in var_names:
if var != self.class_var_name:
self.factor_freq[var] = {}
for var_category in var_names[var]:
self.factor_freq[var][var_category] = {}
for class_category in var_names[self.class_var_name]:
self.factor_freq[var][var_category][class_category] = 0
# Setup for the frequency mapping for the resultant categories.
for class_category in var_names[self.class_var_name]:
self.result_freq[class_category] = 0
# Create the NB model basing on training data.
def train_model(self, training_data):
# Create a counter for each combination for a specific class value.
# Probability is calculated as a n(X1) / n(S).
for data_point in training_data:
for data_point_val in data_point:
if data_point_val != self.class_var_name:
self.factor_freq[data_point_val][data_point[data_point_val]][data_point[self.class_var_name]] += 1
self.result_freq[data_point[self.class_var_name]] += 1
"""
TBD: Using the trained model to calculate prob. for the test data.
"""
| mit |
toobaz/pandas | pandas/tests/indexes/multi/test_reshape.py | 2 | 3470 | import numpy as np
import pytest
import pandas as pd
from pandas import Index, MultiIndex
import pandas.util.testing as tm
def test_insert(idx):
# key contained in all levels
new_index = idx.insert(0, ("bar", "two"))
assert new_index.equal_levels(idx)
assert new_index[0] == ("bar", "two")
# key not contained in all levels
new_index = idx.insert(0, ("abc", "three"))
exp0 = Index(list(idx.levels[0]) + ["abc"], name="first")
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(idx.levels[1]) + ["three"], name="second")
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ("abc", "three")
# key wrong length
msg = "Item must have length equal to number of levels"
with pytest.raises(ValueError, match=msg):
idx.insert(0, ("foo2",))
left = pd.DataFrame([["a", "b", 0], ["b", "d", 1]], columns=["1st", "2nd", "3rd"])
left.set_index(["1st", "2nd"], inplace=True)
ts = left["3rd"].copy(deep=True)
left.loc[("b", "x"), "3rd"] = 2
left.loc[("b", "a"), "3rd"] = -1
left.loc[("b", "b"), "3rd"] = 3
left.loc[("a", "x"), "3rd"] = 4
left.loc[("a", "w"), "3rd"] = 5
left.loc[("a", "a"), "3rd"] = 6
ts.loc[("b", "x")] = 2
ts.loc["b", "a"] = -1
ts.loc[("b", "b")] = 3
ts.loc["a", "x"] = 4
ts.loc[("a", "w")] = 5
ts.loc["a", "a"] = 6
right = pd.DataFrame(
[
["a", "b", 0],
["b", "d", 1],
["b", "x", 2],
["b", "a", -1],
["b", "b", 3],
["a", "x", 4],
["a", "w", 5],
["a", "a", 6],
],
columns=["1st", "2nd", "3rd"],
)
right.set_index(["1st", "2nd"], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right["3rd"])
# GH9250
idx = (
[("test1", i) for i in range(5)]
+ [("test2", i) for i in range(6)]
+ [("test", 17), ("test", 18)]
)
left = pd.Series(np.linspace(0, 10, 11), pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[("test", 17)] = 11
left.loc[("test", 18)] = 12
right = pd.Series(np.linspace(0, 12, 13), pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_append(idx):
result = idx[:3].append(idx[3:])
assert result.equals(idx)
foos = [idx[:1], idx[1:3], idx[3:]]
result = foos[0].append(foos[1:])
assert result.equals(idx)
# empty
result = idx.append([])
assert result.equals(idx)
def test_repeat():
reps = 2
numbers = [1, 2, 3]
names = np.array(["foo", "bar"])
m = MultiIndex.from_product([numbers, names], names=names)
expected = MultiIndex.from_product([numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
def test_insert_base(idx):
result = idx[1:4]
# test 0th element
assert idx[0:4].equals(result.insert(0, idx[0]))
def test_delete_base(idx):
expected = idx[1:]
result = idx.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = idx[:-1]
result = idx.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# Exception raised depends on NumPy version.
idx.delete(len(idx))
| bsd-3-clause |
r0k3/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
BhallaLab/moose-examples | snippets/reacDiffBranchingNeuron.py | 2 | 8284 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
def makeModel():
model = moose.Neutral( '/model' )
# Make neuronal model. It has no channels, just for geometry
cell = moose.loadModel( './branching.p', '/model/cell', 'Neutral' )
# We don't want the cell to do any calculations. Disable everything.
for i in moose.wildcardFind( '/model/cell/##' ):
i.tick = -1
# create container for model
model = moose.element( '/model' )
chem = moose.Neutral( '/model/chem' )
# The naming of the compartments is dicated by the places that the
# chem model expects to be loaded.
compt0 = moose.NeuroMesh( '/model/chem/compt0' )
compt0.separateSpines = 0
compt0.geometryPolicy = 'cylinder'
#reacSystem = moose.loadModel( 'simpleOsc.g', '/model/chem', 'ee' )
makeChemModel( compt0 ) # Populate all compt with the chem system.
compt0.diffLength = 1e-6 # This will be over 100 compartments.
# This is the magic command that configures the diffusion compartments.
compt0.subTreePath = cell.path + "/#"
moose.showfields( compt0 )
# Build the solvers. No need for diffusion in this version.
ksolve0 = moose.Ksolve( '/model/chem/compt0/ksolve' )
dsolve0 = moose.Dsolve( '/model/chem/compt0/dsolve' )
stoich0 = moose.Stoich( '/model/chem/compt0/stoich' )
# Configure solvers
stoich0.compartment = compt0
stoich0.ksolve = ksolve0
stoich0.dsolve = dsolve0
stoich0.path = '/model/chem/compt0/#'
assert( stoich0.numVarPools == 3 )
assert( stoich0.numProxyPools == 0 )
assert( stoich0.numRates == 4 )
num = compt0.numDiffCompts - 1
moose.element( '/model/chem/compt0/a[' + str(num) + ']' ).concInit *= 1.5
# Create the output tables
graphs = moose.Neutral( '/model/graphs' )
makeTab( 'a_soma', '/model/chem/compt0/a[0]' )
makeTab( 'b_soma', '/model/chem/compt0/b[0]' )
makeTab( 'a_apical', '/model/chem/compt0/a[' + str( num ) + ']' )
makeTab( 'b_apical', '/model/chem/compt0/b[' + str( num ) + ']' )
def makeTab( plotname, molpath ):
tab = moose.Table2( '/model/graphs/' + plotname ) # Make output table
# connect up the tables
moose.connect( tab, 'requestOut', moose.element( molpath ), 'getConc' );
def makeDisplay():
plt.ion()
fig = plt.figure( figsize=(10,12) )
layout = fig.add_subplot( 211 )
plt.ylabel( 'x position + 10*conc' )
plt.xlabel( 'y position (microns)' )
timeLabel = plt.text(0, 20, 'time = 0')
layout.set_xlim( -5, 75 )
layout.set_ylim( -20, 25 )
compt = moose.element( '/model/chem/compt0' )
pos = compt.voxelMidpoint
i = len( pos ) / 3
r2 = numpy.sqrt( 0.5 )
yp = [ -r2 * pos[j] * 1e6 for j in range( i ) ]
xp = pos[i:2*i] * 1e6 - yp
#xp = [ pos[i + j] for j in range( i ) ]
#yp = [ -r2 * pos[j] for j in range( i ) ]
#line0, = layout.plot( pos[:i], pos[i:2*i] , 'bo' )
line, = layout.plot( xp, yp, 'bo' )
timeSeries = fig.add_subplot( 212 )
timeSeries.set_ylim( 0, 0.6 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
fig.canvas.draw()
return ( timeSeries, fig, line, timeLabel, yp )
def updateDisplay( plotlist ):
a = moose.vec( '/model/chem/compt0/a' )
b = moose.vec( '/model/chem/compt0/b' )
plotlist[2].set_ydata( a.conc * 10 + plotlist[4] )
plotlist[1].canvas.draw()
def finalizeDisplay( plotlist, cPlotDt ):
for x in moose.wildcardFind( '/model/graphs/#[ISA=Table2]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = plotlist[0].plot( pos, x.vector, label=x.name )
plt.legend()
plotlist[1].canvas.draw()
print( "Hit any key to exit" )
try:
a = raw_input( )
except NameError as e:
a = input( )
def makeChemModel( compt ):
"""
This function sets up a simple oscillatory chemical system within
the script. The reaction system is::
s ---a---> a // s goes to a, catalyzed by a.
s ---a---> b // s goes to b, catalyzed by a.
a ---b---> s // a goes to s, catalyzed by b.
b -------> s // b is degraded irreversibly to s.
in sum, **a** has a positive feedback onto itself and also forms **b**.
**b** has a negative feedback onto **a**.
Finally, the diffusion constant for **a** is 1/10 that of **b**.
"""
# create container for model
diffConst = 10e-12 # m^2/sec
motorRate = 1e-6 # m/sec
concA = 1 # millimolar
# create molecules and reactions
a = moose.Pool( compt.path + '/a' )
b = moose.Pool( compt.path + '/b' )
s = moose.Pool( compt.path + '/s' )
e1 = moose.MMenz( compt.path + '/e1' )
e2 = moose.MMenz( compt.path + '/e2' )
e3 = moose.MMenz( compt.path + '/e3' )
r1 = moose.Reac( compt.path + '/r1' )
a.concInit = 0.1
b.concInit = 0.1
s.concInit = 1
moose.connect( e1, 'sub', s, 'reac' )
moose.connect( e1, 'prd', a, 'reac' )
moose.connect( a, 'nOut', e1, 'enzDest' )
e1.Km = 1
e1.kcat = 1
moose.connect( e2, 'sub', s, 'reac' )
moose.connect( e2, 'prd', b, 'reac' )
moose.connect( a, 'nOut', e2, 'enzDest' )
e2.Km = 1
e2.kcat = 0.5
moose.connect( e3, 'sub', a, 'reac' )
moose.connect( e3, 'prd', s, 'reac' )
moose.connect( b, 'nOut', e3, 'enzDest' )
e3.Km = 0.1
e3.kcat = 1
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'prd', s, 'reac' )
r1.Kf = 0.3 # 1/sec
r1.Kb = 0 # 1/sec
# Assign parameters
a.diffConst = diffConst/10
b.diffConst = diffConst
s.diffConst = 0
def main():
"""
This example illustrates how to define a kinetic model embedded in
the branching pseudo 1-dimensional geometry of a neuron. This means
that diffusion only happens along the axis of dendritic segments, not
radially from inside to outside a dendrite, nor tangentially around
the dendrite circumference. The model
oscillates in space and time due to a Turing-like reaction-diffusion
mechanism present in all compartments. For the sake of this demo,
the initial conditions are set to be slightly different on one of the
terminal dendrites, so as to break the symmetry and initiate
oscillations.
This example uses an external model file to specify a binary branching
neuron. This model does not have any spines. The electrical model is
used here purely for the geometry and is not part of the computations.
In this example we build an identical chemical model throughout the
neuronal geometry, using the makeChemModel function.
The model is set up to run using the Ksolve for integration and the
Dsolve for handling diffusion.
The display has two parts:
a. Animated pseudo-3D plot of neuronal geometry, where each point
represents a diffusive voxel and moves in the y-axis to show
changes in concentration.
b. Time-series plot that appears after the simulation has
ended. The plots are for the first and last diffusive voxel,
that is, the soma and the tip of one of the apical dendrites.
"""
chemdt = 0.1 # Tested various dts, this is reasonable.
diffdt = 0.01
plotdt = 1
animationdt = 5
runtime = 750
makeModel()
plotlist = makeDisplay()
# Schedule the whole lot. Autoscheduling already takes care of these
'''
for i in range( 11, 17 ):
moose.setClock( i, chemdt ) # for the chem objects
moose.setClock( 10, diffdt ) # for the diffusion
moose.setClock( 18, plotdt ) # for the output tables.
'''
moose.reinit()
for i in range( 0, runtime, animationdt ):
moose.start( animationdt )
plotlist[3].set_text( "time = %d" % i )
updateDisplay( plotlist )
finalizeDisplay( plotlist, plotdt )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-2.0 |
alexsavio/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 58 | 3692 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
mne-tools/mne-python | mne/viz/tests/test_montage.py | 14 | 2773 | # Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Teon Brooks <[email protected]>
#
# License: Simplified BSD
# Set our plotters to test mode
import os.path as op
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne.channels import (read_dig_fif, make_dig_montage,
make_standard_montage)
p_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit', 'tests', 'data')
elp = op.join(p_dir, 'test_elp.txt')
hsp = op.join(p_dir, 'test_hsp.txt')
hpi = op.join(p_dir, 'test_mrk.sqd')
point_names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
io_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
fif_fname = op.join(io_dir, 'test_raw.fif')
def test_plot_montage():
"""Test plotting montages."""
m = make_standard_montage('easycap-M1')
m.plot()
plt.close('all')
m.plot(kind='3d')
plt.close('all')
m.plot(kind='3d', show_names=True)
plt.close('all')
m.plot(kind='topomap')
plt.close('all')
m.plot(kind='topomap', show_names=True)
plt.close('all')
N_HSP, N_HPI = 2, 1
montage = make_dig_montage(nasion=[1, 1, 1], lpa=[2, 2, 2], rpa=[3, 3, 3],
hsp=np.full((N_HSP, 3), 4),
hpi=np.full((N_HPI, 3), 4),
coord_frame='head')
assert '0 channels' in repr(montage)
with pytest.raises(RuntimeError, match='No valid channel positions'):
montage.plot()
d = read_dig_fif(fname=fif_fname)
assert '61 channels' in repr(d)
# XXX this is broken; dm.point_names is used. Sometimes we say this should
# Just contain the HPI coils, other times that it's all channels (e.g.,
# EEG channels). But there is redundancy with this and dm.dig_ch_pos.
# This should be addressed in the pending big refactoring.
# d.plot()
# plt.close('all')
@pytest.mark.parametrize('name, n', [
('standard_1005', 342), ('standard_postfixed', 85),
('standard_primed', 85), ('standard_1020', 93)
])
def test_plot_defect_montage(name, n):
"""Test plotting defect montages (i.e. with duplicate labels)."""
# montage name and number of unique labels
m = make_standard_montage(name)
n -= 3 # new montage does not have fiducials
fig = m.plot()
collection = fig.axes[0].collections[0]
assert collection._edgecolors.shape[0] == n
assert collection._facecolors.shape[0] == n
assert collection._offsets.shape[0] == n
def test_plot_digmontage():
"""Test plot DigMontage."""
montage = make_dig_montage(
ch_pos=dict(zip(list('abc'), np.eye(3))),
coord_frame='head'
)
montage.plot()
plt.close('all')
| bsd-3-clause |
carrillo/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
cgre-aachen/gempy | gempy/bayesian/theano_op.py | 1 | 5469 | import theano
import theano.tensor as tt
import gempy as gp
import copy
class GemPyThOp:
def __init__(self, model: gp.Project):
model = copy.deepcopy(model)
gp.set_interpolator(model, compile_theano=False,
output=['geology', 'gravity', 'magnetics'],
gradient=True)
self.model = model
self.th_op = None
def get_output(self, output):
if output == 'gravity':
out = self.model._interpolator.theano_graph.theano_output()[12][0]
elif output == 'lith':
out = self.model._interpolator.theano_graph.theano_output()[0][1]
else:
raise AttributeError()
return out
def get_wrt(self, wrt: str):
if wrt == 'surface_points':
wrt_ = self.model._interpolator.theano_graph.input_parameters_loop[4]
else:
raise AttributeError
return wrt_
def set_th_op(self, output):
interpolator = self.model._interpolator
out = self.get_output(output)
i = interpolator.get_python_input_block()
theano.config.compute_test_value = 'ignore'
self.th_op = theano.OpFromGraph(interpolator.theano_graph.input_parameters_loop,
[out],
inline=False,
on_unused_input='ignore',
name=output)
return self.th_op
def test_gradient(self, output: str, wrt: str):
theano.config.compute_test_value = 'ignore'
interpolator = self.model._interpolator
out = self.get_output(output)
wrt_ = self.get_wrt(wrt)
geo_model_T = theano.OpFromGraph(interpolator.theano_graph.input_parameters_loop,
[theano.grad(out[0], wrt_)],
inline=True,
on_unused_input='ignore',
name='test_'+output)
i = interpolator.get_python_input_block()
th_f = theano.function([], geo_model_T(*i), on_unused_input='warn')
interpolator.theano_graph.sig_slope.set_value(20)
return th_f()
@staticmethod
def set_shared(python_input):
input_sh = []
i = python_input
for ii in i:
input_sh.append(theano.shared(ii))
return input_sh
def gempy_th_op(geo_model):
theano.config.compute_test_value = 'ignore'
geo_model_T = theano.OpFromGraph(geo_model.interpolator.theano_graph.input_parameters_loop,
[theano.grad(geo_model.interpolator.theano_graph.theano_output()[12],
geo_model.interpolator.theano_graph.input_parameters_loop[4])],
inline=True,
on_unused_input='ignore',
name='forw_grav')
# %%
i = geo_model.interpolator.get_python_input_block()
th_f = theano.function([], geo_model_T(*i), on_unused_input='warn')
# %%
geo_model.interpolator.theano_graph.sig_slope.set_value(20)
# %%
th_f()
# %%
# Setup Bayesian model
# --------------------
#
# %%
i = geo_model.interpolator.get_python_input_block()
theano.config.compute_test_value = 'ignore'
geo_model_T_grav = theano.OpFromGraph(geo_model.interpolator.theano_graph.input_parameters_loop,
[geo_model.interpolator.theano_graph.theano_output()[12]],
inline=False,
on_unused_input='ignore',
name='forw_grav')
# %%
geo_model_T_thick = theano.OpFromGraph(geo_model.interpolator.theano_graph.input_parameters_loop,
[geo_model.interpolator.theano_graph.compute_series()[0][1][0:250000]], inline=True,
on_unused_input='ignore',
name='geo_model')
# %%
# We convert a python variable to theano.shared
input_sh = []
i = geo_model.interpolator.get_python_input_block()
for ii in i:
input_sh.append(theano.shared(ii))
# We get the rescaling parameters:
rf = geo_model.rescaling.df.loc['values', 'rescaling factor'].astype('float32')
centers = geo_model.rescaling.df.loc['values', 'centers'].astype('float32')
# We create pandas groups by id to be able to modify several points at the same time:
g = geo_model.surface_points.df.groupby('id')
l = theano.shared(np.array([], dtype='float64'))
# %%
g_obs_p = 1e3 * np.array([-0.3548658 , -0.35558686, -0.3563156 , -0.35558686, -0.3548658 ,
-0.3534237 , -0.35201198, -0.3534237 , -0.3548658 , -0.3563401 ,
-0.3548658 , -0.35558686, -0.3548658 , -0.3541554 , -0.3534569 ,
-0.3527707 , -0.35424498, -0.35575098, -0.3572901 , -0.35575098,
-0.35424498, -0.35575098, -0.35424498, -0.35575098, -0.35424498,
-0.35575098, -0.35643718, -0.35713565, -0.35643718], dtype='float32')
y_obs_list = 1e3 * np.array([2.12, 2.06, 2.08, 2.05, 2.08, 2.09,
2.19, 2.07, 2.16, 2.11, 2.13, 1.92])
# %%
# Python input variables
i = geo_model.interpolator.get_python_input_block()
| lgpl-3.0 |
nelson-liu/scikit-learn | examples/applications/plot_out_of_core_classification.py | 51 | 13651 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
jfsehuanes/thunderfish | thunderfish/tracker.py | 1 | 46236 | """
Track wave-type electric fish frequencies over time.
fish_tracker(): load data and track fish.
"""
import sys
import os
import argparse
import numpy as np
from .version import __version__
from .configfile import ConfigFile
from .dataloader import open_data
from .powerspectrum import spectrogram, next_power_of_two
from .harmonicgroups import add_psd_peak_detection_config, add_harmonic_groups_config
from .harmonicgroups import harmonic_groups_args, psd_peak_detection_args
from .harmonicgroups import harmonic_groups, fundamental_freqs, plot_psd_harmonic_groups
try:
import matplotlib.pyplot as plt
except ImportError:
pass
# TODO: update to numpy doc style!
def extract_fundamentals(data, samplerate, start_time=0.0, end_time=-1.0,
data_snippet_secs=60.0,
nffts_per_psd=4, fresolution=0.5, overlap_frac=.9,
plot_harmonic_groups=False, verbose=0, **kwargs):
"""
For a long data array calculates spectograms of small data snippets, computes PSDs, extracts harmonic groups and
extracts fundamental frequncies.
:param data: (array) raw data.
:param samplerate: (int) samplerate of data.
:param start_time: (int) analyze data from this time on (in seconds). XXX this should be a float!!!! Internally I would use indices.
:param end_time: (int) stop analysis at this time (in seconds). If -1 then analyse to the end of the data. XXX TODO this should be a float!!!! Internally I would use indices.
:param data_snippet_secs: (float) duration of data snipped processed at once in seconds. Necessary because of memory issues.
:param nffts_per_psd: (int) number of nffts used for calculating one psd.
:param fresolution: (float) frequency resolution for the spectrogram.
:param overlap_frac: (float) overlap of the nffts (0 = no overlap; 1 = total overlap).
:param verbose: (int) with increasing value provides more output on console.
:param kwargs: further arguments are passed on to harmonic_groups().
:return all_fundamentals: (list) containing arrays with the fundamentals frequencies of fishes detected at a certain time.
:return all_times: (array) containing time stamps of frequency detection. ( len(all_times) == len(fishes[xy]) )
"""
all_fundamentals = []
all_times = np.array([])
if end_time < 0.0:
end_time = len(data)/samplerate
nfft = next_power_of_two(samplerate / fresolution)
if len(data.shape) > 1:
channels = range(data.shape[1])
else:
channels = range(1)
while start_time < int((len(data)- data_snippet_secs*samplerate) / samplerate) or int(start_time) == 0:
if verbose >= 3:
print('Minute %.2f' % (start_time/60))
for channel in channels:
# print(channel)
if len(channels) > 1:
tmp_data = data[int(start_time*samplerate) : int((start_time+data_snippet_secs)*samplerate), channel]
else:
tmp_data = data[int(start_time*samplerate) : int((start_time+data_snippet_secs)*samplerate)]
# spectrogram
spectrum, freqs, time = spectrogram(tmp_data, samplerate, fresolution=fresolution, overlap_frac=overlap_frac) # nfft window = 2 sec
# psd and fish fundamentals frequency detection
tmp_power = [np.array([]) for i in range(len(time)-(nffts_per_psd-1))]
for t in range(len(time)-(nffts_per_psd-1)):
# power = np.mean(spectrum[:, t:t+nffts_per_psd], axis=1)
tmp_power[t] = np.mean(spectrum[:, t:t+nffts_per_psd], axis=1)
if channel == 0:
power = tmp_power
else:
for t in range(len(power)):
power[t] += tmp_power[t]
all_times = np.concatenate((all_times, time[:-(nffts_per_psd-1)] + start_time))
for p in range(len(power)):
fishlist, _, mains, all_freqs, good_freqs, _, _, _ = harmonic_groups(freqs, power[p], **kwargs)
fundamentals = fundamental_freqs(fishlist)
all_fundamentals.append(fundamentals)
if plot_harmonic_groups:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plot_psd_harmonic_groups(ax, freqs, power[p], fishlist, mains,
all_freqs, good_freqs, max_freq=3000.0)
ax.set_title('time = %gmin' % ((start_time+0.0)/60.0)) # XXX TODO plus what???
plt.show()
if (len(all_times) % ((len(time) - (nffts_per_psd-1)) * 30)) > -1 and (
len(all_times) % ((len(time) - (nffts_per_psd-1)) * 30)) < 1:
if verbose >= 3:
print('Minute %.0f' % (start_time/60))
start_time += time[-nffts_per_psd] - (0.5 -(1-overlap_frac)) * nfft / samplerate
if end_time > 0:
if start_time >= end_time:
if verbose >= 3:
print('End time reached!')
break
return all_fundamentals, all_times
def first_level_fish_sorting(all_fundamentals, base_name, all_times, prim_time_tolerance=5., freq_tolerance = .5,
save_original_fishes=False, output_folder = '.', verbose=0):
"""
Sorts fundamental frequencies of wave-type electric fish detected at certain timestamps to fishes.
There is an array of fundamental frequencies for every timestamp (all_fundamentals). Each of these frequencies is
compared to the last frequency of already detected fishes (last_fish_fundamentals). If the frequency difference
between the new frequency and one or multiple already detected fishes the frequency is appended to the array
containing all frequencies of the fish (fishes) that has been absent for the shortest period of time. If the
frequency doesn't fit to one fish, a new fish array is created. If a fish has not been detected at one time-step
a NaN is added to this fish array.
The result is for each fish a array containing frequencies or nans with the same length than the time array
(all_times). These fish arrays can be saved as .npy file to access the code after the time demanding step.
:param all_fundamentals: (list) containing arrays with the fundamentals frequencies of fishes detected at a certain time.
:param base_name: (string) filename.
:param all_times: (array) containing time stamps of frequency detection. ( len(all_times) == len(fishes[xy]) )
:param prim_time_tolerance: (int) time in minutes from when a certain fish is no longer tracked.
:param freq_tolerance: (float) maximum frequency difference to assign a frequency to a certain fish.
:param save_original_fishes: (boolean) if True saves the sorted fishes after the first level of fish sorting.
:param verbose: (int) with increasing value provides more shell output.
:return fishes: (list) containing arrays of sorted fish frequencies. Each array represents one fish.
"""
def clean_up(fishes, last_fish_fundamentals, end_nans):
"""
Delete fish arrays with too little data points to reduce memory usage.
:param fishes: (list) containing arrays of sorted fish frequencies. Each array represents one fish.
:param last_fish_fundamentals: (list) contains for every fish in fishes the last detected fundamental frequency.
:param end_nans: (list) for every fish contains the counts of nans since the last fundamental detection.
:return: fishes: (list) cleaned up input list.
:return: last_fish_fundamentals: (list) cleaned up input list.
:return: end_nans: (list) cleaned up input list.
"""
for fish in reversed(range(len(fishes))):
if len(np.array(fishes[fish])[~np.isnan(fishes[fish])]) <= 10:
fishes.pop(fish)
last_fish_fundamentals.pop(fish)
end_nans.pop(fish)
return fishes, last_fish_fundamentals, end_nans
detection_time_diff = all_times[1] - all_times[0]
dpm = 60. / detection_time_diff # detections per minutes
fishes = [np.full(len(all_fundamentals)+1, np.nan)]
fishes[0][0] = 0.
last_fish_fundamentals = [ 0. ]
end_nans = [0]
# for every list of fundamentals ...
clean_up_idx = int(30 * dpm)
for enu, fundamentals in enumerate(all_fundamentals):
if enu == clean_up_idx:
if verbose >= 3:
print('cleaning up ...')
fishes, last_fish_fundamentals, end_nans = clean_up(fishes, last_fish_fundamentals, end_nans)
clean_up_idx += int(30 * dpm)
for idx in range(len(fundamentals)):
diff = np.abs(np.asarray(last_fish_fundamentals) - fundamentals[idx])
sorted_diff_idx = np.argsort(diff)
tolerated_diff_idx = sorted_diff_idx[diff[sorted_diff_idx] < freq_tolerance]
last_detect_of_tolerated = np.array(end_nans)[tolerated_diff_idx]
if len(tolerated_diff_idx) == 0:
fishes.append(np.full(len(all_fundamentals)+1, np.nan))
fishes[-1][enu+1] = fundamentals[idx]
last_fish_fundamentals.append(fundamentals[idx])
end_nans.append(0)
else:
found = False
for i in tolerated_diff_idx[np.argsort(last_detect_of_tolerated)]:
if np.isnan(fishes[i][enu+1]):
fishes[i][enu+1] = fundamentals[idx]
last_fish_fundamentals[i] = fundamentals[idx]
end_nans[i] = 0
found = True
break
if not found:
fishes.append(np.full(len(all_fundamentals)+1, np.nan))
fishes[-1][enu+1] = fundamentals[idx]
last_fish_fundamentals.append(fundamentals[idx])
end_nans.append(0)
for fish in range(len(fishes)):
if end_nans[fish] >= prim_time_tolerance * dpm:
last_fish_fundamentals[fish] = 0.
if np.isnan(fishes[fish][enu+1]):
end_nans[fish] += 1
if verbose >= 3:
print('cleaning up ...')
fishes, last_fish_fundamentals, end_nans = clean_up(fishes, last_fish_fundamentals, end_nans)
# reshape everything to arrays
for fish in range(len(fishes)):
fishes[fish] = fishes[fish][1:]
# if not removed be clean_up(): remove first fish because it has been used for the first comparison !
if fishes[0][0] == 0.:
fishes.pop(0)
if save_original_fishes:
print('saving')
np.save(os.path.join(output_folder, base_name) + '-fishes.npy', np.asarray(fishes))
np.save(os.path.join(output_folder, base_name) + '-times.npy', all_times)
return np.asarray(fishes)
def detect_rises(fishes, all_times, rise_f_th = .5, verbose = 0):
"""
Detects rises in frequency arrays that belong to a certain fish.
Single rises are detected with the function 'detect_single_rises()' and get appended to a list.
When the function 'detect_single_rises()' detects a rise it returns some data about the rise and continues seaching
for rises at that index in the data where the detected rise ended. (While-loop)
:param fishes: (array) containing arrays of sorted fish frequencies. Each array represents one fish.
:param all_times: (array) containing time stamps of frequency detection. ( len(all_times) == len(fishes[xy]) )
:param rise_f_th: (float) minimum frequency difference between peak and base of a rise to be detected as such.
:return all_rises: (list) contains a list for each fish which each contains a list for every detected rise. In this
last list there are two arrays containing the frequency and the index of start and end of the rise.
all_rises[ fish ][ rise ][ [idx_start, idx_end], [freq_start, freq_end] ]
"""
def detect_single_rise(fish, non_nan_idx, rise_f_th, dpm):
"""
Detects a single rise in an array of fish frequencies.
At first and an index of the array is detected from where on in the next 10 seconds every frequency is lower.
This index is at first assumed as the peak of the rise.
Afterwards and index is searched for from where on in the next 30 seconds every frequency is larger.
This index is assumed as the end of the rise.
The other possibility to get an end index of a rise is when the frequency doesnt drop any longer.
If during the process of finding the end and the peak of the rise, the time difference between those indices
rise above a time threshold (10 min) or the frequency rises above the assumed peak frequency of the rise, both
indices are withdrawn and the seach continues.
When both a peak and a end index are detected the frequency difference between those indices have to be larger
than n * frequency threshold. n is defined by the time difference between peak and end of the rise.
In the end index and frequency of rise peak and end are part if the return as well as the non_nan_indices of the
fish array that are larger than the end index of the detected rise.
:param fish: (array) sorted fish frequencies-
:param non_nan_idx: (array) Indices where the fish array is not Nan.
:param f_th: (float) minimum frequency difference between peak and base of a rise to be detected as such.
:param dpm: (float) delta-t of the fish array.
:return: index and frequency of start and end of one detected rise.
[[start_idx, end_idx], [start_freq, end_freq]]
:return: Indices where the fish array is not Nan only containing those values larger than the end_idx of the
detected rise.
"""
loop_idxs = np.arange(len(non_nan_idx[non_nan_idx <= non_nan_idx[-1] - dpm/ 60. * 10]))
for i in loop_idxs:
help_idx = np.arange(len(non_nan_idx))[non_nan_idx < non_nan_idx[i] + dpm / 60. * 10][-1]
idxs = non_nan_idx[i+1:help_idx]
if len(idxs) < dpm / 60. * 1.:
continue
if len(fish[idxs][fish[idxs] < fish[non_nan_idx[i]]]) == len(fish[idxs]):
for j in loop_idxs[loop_idxs > i]:
if fish[non_nan_idx[j]] >= fish[non_nan_idx[i]]:
break
if non_nan_idx[j] - non_nan_idx[i] >= dpm * 10.:
break
help_idx2 = np.arange(len(non_nan_idx))[non_nan_idx < non_nan_idx[j] + dpm / 60. * 30][-1]
idxs2 = non_nan_idx[j+1:help_idx2]
last_possibe = False
if fish[non_nan_idx[j]] - np.median(fish[idxs2]) < 0.05:
last_possibe = True
if len(fish[idxs2][fish[idxs2] >= fish[non_nan_idx[j]]]) == len(fish[idxs2]) or non_nan_idx[j] == non_nan_idx[-1] or last_possibe:
freq_th = rise_f_th + ((non_nan_idx[j] - non_nan_idx[i]) *1.) // (dpm /60. *30) * rise_f_th
if fish[non_nan_idx[i]] - fish[non_nan_idx[j]] >= freq_th:
nnans_befor_start = non_nan_idx[(non_nan_idx > non_nan_idx[i] - dpm / 60 *10) & (non_nan_idx <= non_nan_idx[i])]
diff_nnans_before = np.append([nnans_befor_start[0] - (non_nan_idx[i] - dpm / 60 * 10)],np.diff(nnans_befor_start))
if len(diff_nnans_before[diff_nnans_before >= dpm / 60 * 3]) > 0:
new_start_idx = nnans_befor_start[diff_nnans_before >= dpm / 60 * 3][-1]
return [[new_start_idx, non_nan_idx[j]], [fish[new_start_idx], fish[non_nan_idx[j]]]] , non_nan_idx[j+1:]
return [[non_nan_idx[i], non_nan_idx[j]], [fish[non_nan_idx[i]], fish[non_nan_idx[j]]]], non_nan_idx[j+1:]
else:
break
return [[], []], [non_nan_idx[-1]]
detection_time_diff = all_times[1] - all_times[0]
dpm = 60. / detection_time_diff
all_rises = []
progress = '0.00'
if verbose >= 3:
print('Progress:')
for enu, fish in enumerate(fishes):
if verbose >= 3:
if ('%.2f' % (enu * 1.0 / len(fishes))) != progress:
print('%.2f' % (enu * 1.0 / len(fishes)))
progress = ('%.2f' % (enu * 1.0 / len(fishes)))
non_nan_idx = np.arange(len(fish))[~np.isnan(fish)]
fish_rises = []
while non_nan_idx[-1] - non_nan_idx[0] > (dpm / 60. * 10) + 1:
rise_data, non_nan_idx = detect_single_rise(fish, non_nan_idx, rise_f_th, dpm)
fish_rises.append(rise_data)
if not fish_rises == []:
if fish_rises[-1][0] == []:
fish_rises.pop(-1)
all_rises.append(fish_rises)
return all_rises
def combine_fishes(fishes, all_times, all_rises, max_time_tolerance = 10., f_th = 5.):
"""
Combines array of electric fish fundamental frequencies which, based on frequency difference and time of occurrence
likely belong to the same fish.
Every fish is compared to the fishes that appeared before this fish. If the time of occurrence of two fishes overlap
or differ by less than a certain time tolerance (10 min.) for each of these fishes a compare index is determined.
For the fish that second this compare index is either the index of the end of a rise (when the fish array begins
with a rise) of the first index of frequency detection (when the fish array doesn't begin with a rise). For the fish
that occurred first the compare index is the first index of detection before the compare index of the second fish.
If the frequency of the two fishes at the compare indices differ by less than the frequency threshold and the counts
of detections at the same time is below threshold a 'distance value' is calculated
(frequency difference + alpha * time difference oc occur index). These 'distance values' are saved in a matrix.
After all this matrix contains the 'distance values' for every fish to all other fishes of Nans if the fishes are
not combinable. Ever fish and its 'distant values' to all other fishes is represented by a row in the matrix.
(possible_combination_all_fish[fish][compare_fish] = 'distance value' between fish and compare_fish).
In the next step the fish arrays get combined. Therefore the minimum 'distance value' in the whole matrix is located.
The index of this value match the fishes that fit together the best. The values of the second fish (fish) get
transfered into the array of the first fish (comp_fish). Furthermore in the 'distance value' matrix the values that
pointed to the second fish now point to the first fish. Since the second fish can't anymore point to another fish
its row in the 'distance value' matrix gets replaced the an array full of nans.
This process is repeated until this 'distance value' matrix only consists of Nans.
When a fish is combined with another its rise data also gets transfered.
In the end the list of fish frequency arrays gets cleaned up as well as the rise array. (Resulting from the sorting
process the fishes array contains arrays only consisting of Nans. These get deleated.)
:param fishes: (array) containing arrays of sorted fish frequencies. Each array represents one fish.
:param all_times: (array) containing time stamps of frequency detection. ( len(all_times) == len(fishes[xy]) )
:param all_rises: (list) contains a list for each fish which each contains a list for every detected rise. In this
last list there are two arrays containing the frequency and the index of start and end of the rise.
all_rises[ fish ][ rise ][ [idx_start, idx_end], [freq_start, freq_end] ]
:param max_time_tolerance: (float) maximum time difference in min. between two fishes to allow combination.
:param f_th: (float) maximum frequency difference between two fishes to allow combination
:return fishes: (array) containing arrays of sorted fish frequencies. Each array represents one fish.
:return all_rises: (list) contains a list for each fish which each contains a list for every detected rise. In this
last list there are two arrays containing the frequency and the index of start and end of the rise.
all_rises[ fish ][ rise ][ [idx_start, idx_end], [freq_start, freq_end] ]
"""
detection_time_diff = all_times[1] - all_times[0]
dpm = 60. / detection_time_diff # detections per minutes
occure_idx = []
delete_idx = []
possible_combinations_all_fish = np.array([np.full(len(fishes), np.nan) for i in range(len(fishes))])
for fish in range(len(fishes)):
non_nan_idx = np.arange(len(fishes[fish]))[~np.isnan(fishes[fish])]
first_and_last_idx = np.array([non_nan_idx[0], non_nan_idx[-1]])
occure_idx.append(first_and_last_idx)
occure_order = np.argsort(np.array([occure_idx[i][0] for i in range(len(fishes))]))
for fish in reversed(occure_order):
possible_freq_combinations = np.full(len(fishes), np.nan)
possible_idx_combinations = np.full(len(fishes), np.nan)
possible_combinations = np.full(len(fishes), np.nan)
for comp_fish in reversed(occure_order[:np.where(occure_order == fish)[0][0]]):
combinable = False
if occure_idx[fish][0] > occure_idx[comp_fish][0] and occure_idx[fish][0] < occure_idx[comp_fish][1]:
combinable = True
comp_fish_nnans_idxs = np.arange(len(fishes[comp_fish]))[~np.isnan(fishes[comp_fish])]
if all_rises[fish] != []:
if occure_idx[fish][0] in [all_rises[fish][i][0][0] for i in range(len(all_rises[fish]))]:
x = np.where( np.array([all_rises[fish][i][0][0] for i in range(len(all_rises[fish]))]) == occure_idx[fish][0])[0][0]
compare_idxs = [all_rises[fish][x][0][1], comp_fish_nnans_idxs[comp_fish_nnans_idxs < all_rises[fish][x][0][1]][-1]]
else:
compare_idxs = [occure_idx[fish][0], comp_fish_nnans_idxs[comp_fish_nnans_idxs < occure_idx[fish][0]][-1]]
else:
compare_idxs = [occure_idx[fish][0], comp_fish_nnans_idxs[comp_fish_nnans_idxs < occure_idx[fish][0]][-1]]
elif occure_idx[fish][0] > occure_idx[comp_fish][1] and occure_idx[fish][0] - occure_idx[comp_fish][1] < max_time_tolerance * dpm:
combinable = True
if all_rises[fish] != []:
if occure_idx[fish][0] in [all_rises[fish][i][0][0] for i in range(len(all_rises[fish]))]:
x = np.where( np.array([all_rises[fish][i][0][0] for i in range(len(all_rises[fish]))]) == occure_idx[fish][0])[0][0]
compare_idxs = [all_rises[fish][x][0][1], occure_idx[comp_fish][1]]
else:
compare_idxs = [occure_idx[fish][0], occure_idx[comp_fish][1]]
else:
compare_idxs = [occure_idx[fish][0], occure_idx[comp_fish][1]]
if combinable:
alpha = 0.01 # alpha cant be larger ... to many mistakes !!!
if np.abs(fishes[fish][compare_idxs[0]] - fishes[comp_fish][compare_idxs[1]]) <= f_th:
nan_test = fishes[fish] + fishes[comp_fish]
if len(nan_test[~np.isnan(nan_test)]) <= 20:
possible_freq_combinations[comp_fish] = np.abs(
[fishes[fish][compare_idxs[0]] - fishes[comp_fish][compare_idxs[1]]])
possible_idx_combinations[comp_fish] = np.abs([compare_idxs[0] - compare_idxs[1]])
possible_combinations[comp_fish] = possible_freq_combinations[comp_fish] + possible_idx_combinations[comp_fish] / (dpm / 60.) * alpha
# ax.plot([compare_idxs[0], compare_idxs[1]], [fishes[fish][compare_idxs[0]], fishes[comp_fish][compare_idxs[1]]], '-', color = 'red')
if comp_fish == 0 and len(possible_freq_combinations[~np.isnan(possible_freq_combinations)]) > 0:
possible_combinations_all_fish[fish] = possible_combinations
combining_finished = False
while combining_finished == False:
if np.size(possible_combinations_all_fish[~np.isnan(possible_combinations_all_fish)]) == 0:
combining_finished = True
continue
fish = np.where(possible_combinations_all_fish == np.min(possible_combinations_all_fish[~np.isnan(possible_combinations_all_fish)]))[0][0]
comp_fish = np.where(possible_combinations_all_fish == np.min(possible_combinations_all_fish[~np.isnan(possible_combinations_all_fish)]))[1][0]
nan_test2 = fishes[fish] + fishes[comp_fish]
if len(nan_test2[~np.isnan(nan_test2)]) >= 20:
possible_combinations_all_fish[fish][comp_fish] = np.nan
if np.size(possible_combinations_all_fish[~np.isnan(possible_combinations_all_fish)]) == 0:
combining_finished = True
continue
fishes[comp_fish][~np.isnan(fishes[fish])] = fishes[fish][~np.isnan(fishes[fish])]
fishes[fish] = np.full(len(fishes[fish]), np.nan)
# clean up possible_combination all fish
for i in range(len(possible_combinations_all_fish)):
if not np.isnan(possible_combinations_all_fish[i][fish]):
if np.isnan(possible_combinations_all_fish[i][comp_fish]):
possible_combinations_all_fish[i][comp_fish] = possible_combinations_all_fish[i][fish]
possible_combinations_all_fish[i][fish] = np.nan
elif possible_combinations_all_fish[i][fish] < possible_combinations_all_fish[i][comp_fish]:
possible_combinations_all_fish[i][comp_fish] = possible_combinations_all_fish[i][fish]
possible_combinations_all_fish[i][fish] = np.nan
else:
possible_combinations_all_fish[i][fish] = np.nan
possible_combinations_all_fish[fish] = np.full(len(possible_combinations_all_fish[fish]), np.nan)
if all_rises[fish] != []:
for rise in range(len(all_rises[fish])):
all_rises[comp_fish].append(all_rises[fish][rise])
all_rises[fish] = []
if np.size(possible_combinations_all_fish[~np.isnan(possible_combinations_all_fish)]) == 0:
combining_finished = True
# for i in range(len(fishes)):
# ax.plot(np.arange(len(fishes[i]))[~np.isnan(fishes[i])], fishes[i][~np.isnan(fishes[i])], color= np.random.rand(3, 1), marker='.')
# plt.show()
for fish in reversed(range(len(fishes))):
if len(fishes[fish][~np.isnan(fishes[fish])]) == 0:
delete_idx.append(fish)
all_rises.pop(fish)
return_idxs = np.setdiff1d(np.arange(len(fishes)), np.array(delete_idx))
return fishes[return_idxs], all_rises
def exclude_fishes(fishes, all_times, min_occure_time = 1.):
"""
Delete fishes that are present for a to short period of time.
:param fishes: (list) containing arrays of sorted fish frequencies. Each array represents one fish.
:param all_times: (array) containing time stamps of frequency detection. ( len(all_times) == len(fishes[xy]) )
:param min_occure_time (int) minimum duration a fish has to be available to not get excluded.
:return fishes: (array) containing arrays of sorted fish frequencies. Each array represents one fish.
"""
keep_idx = []
detection_time_diff = all_times[1] - all_times[0]
dpm = 60. / detection_time_diff # detections per minute
for fish in range(len(fishes)):
if len(fishes[fish][~np.isnan(fishes[fish])]) >= min_occure_time * dpm:
keep_idx.append(fish)
return np.asarray(fishes)[keep_idx]
def cut_at_rises(fishes, all_rises):
"""
Cuts fish arrays at detected rise peaks. For each rise two fish arrays are created with the same length as the
original fish array.
This step is necessary because of wrong detections resulting from rises of fishes.
:param fishes: (array) containing arrays of sorted fish frequencies. Each array represents one fish.
:param all_rises: (array) containing time stamps of frequency detection. ( len(all_times) == len(fishes[xy]) )
:return: (array) containing arrays of sorted fish frequencies. Each array represents one fish.
"""
new_fishes = []
delete_idx = []
for fish in reversed(range(len(fishes))):
for rise in reversed(range(len(all_rises[fish]))):
cut_idx = all_rises[fish][rise][0][0]
new_fishes.append(np.full(len(fishes[fish]), np.nan))
new_fishes[-1][cut_idx:] = fishes[fish][cut_idx:]
fishes[fish][cut_idx:] = np.full(len(fishes[fish][cut_idx:]), np.nan)
all_rises.append([all_rises[fish][rise]])
all_rises[fish].pop(rise)
for fish in reversed(range(len(fishes))):
if len(fishes[fish][~np.isnan(fishes[fish])]) <= 10:
delete_idx.append(fish)
all_rises.pop(fish)
return_idx = np.setdiff1d(np.arange(len(fishes)), np.array(delete_idx))
if len(new_fishes) == 0:
return fishes, all_rises
else:
return np.append(fishes[return_idx], new_fishes, axis=0), all_rises
# return np.append(fishes[return_idx], new_fishes, axis=0), all_rises
def save_data(fishes, all_times, all_rises, base_name, output_folder):
np.save(os.path.join(output_folder, base_name) + '-final_fishes.npy', np.asarray(fishes))
np.save(os.path.join(output_folder, base_name) + '-final_times.npy', all_times)
np.save(os.path.join(output_folder, base_name) + '-final_rises.npy', np.asarray(all_rises))
def plot_fishes(fishes, all_times, all_rises, base_name, save_plot, output_folder):
"""
Plot shows the detected fish fundamental frequencies plotted against the time in hours.
:param fishes: (list) containing arrays of sorted fish frequencies. Each array represents one fish.
:param all_times: (array) containing time stamps of frequency detection. ( len(all_times) == len(fishes[xy]) )
"""
fig, ax = plt.subplots(facecolor='white', figsize=(11.6, 8.2))
if all_times[-1] <= 120:
time_factor = 1.
elif all_times[-1] > 120 and all_times[-1] < 7200:
time_factor = 60.
else:
time_factor = 3600.
for fish in range(len(fishes)):
color = np.random.rand(3, 1)
ax.plot(all_times[~np.isnan(fishes[fish])] / time_factor, fishes[fish][~np.isnan(fishes[fish])], color=color, marker='.')
legend_in = False
for fish in range(len(all_rises)):
for rise in all_rises[fish]:
if rise[1][0] - rise[1][1] > 1.5:
if legend_in == False:
ax.plot(all_times[rise[0][0]] / time_factor, rise[1][0], 'o', color='red', markersize= 7,
markerfacecolor='None', label='rise begin')
ax.plot(all_times[rise[0][1]] / time_factor, rise[1][1], 's', color='green', markersize= 7,
markerfacecolor='None', label='rise end')
legend_in = True
plt.legend(loc=1, numpoints=1, frameon=False, fontsize = 12)
else:
ax.plot(all_times[rise[0][0]] / time_factor, rise[1][0], 'o', color='red', markersize=7,
markerfacecolor='None')
ax.plot(all_times[rise[0][1]] / time_factor, rise[1][1], 's', color='green', markersize=7,
markerfacecolor='None')
maxy = np.max(np.array([np.mean(fishes[fish][~np.isnan(fishes[fish])]) for fish in range(len(fishes))]))
miny = np.min(np.array([np.mean(fishes[fish][~np.isnan(fishes[fish])]) for fish in range(len(fishes))]))
plt.ylim([miny-150, maxy+150])
plt.ylabel('Frequency [Hz]', fontsize=14)
if time_factor == 1.:
plt.xlabel('Time [sec]', fontsize=14)
elif time_factor == 60.:
plt.xlabel('Time [min]', fontsize=14)
else:
plt.xlabel('Time [h]', fontsize=14)
plt.title(base_name, fontsize=16)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
if save_plot:
plt.savefig(os.path.join(output_folder, base_name))
plt.close(fig)
else:
plt.show()
def add_tracker_config(cfg, data_snipped_secs = 60., nffts_per_psd = 4, fresolution = 0.5, overlap_frac = .9,
freq_tolerance = 0.5, rise_f_th = 0.5, prim_time_tolerance = 5., max_time_tolerance = 10., f_th=5.):
""" Add parameter needed for fish_tracker() as
a new section to a configuration.
Parameters
----------
cfg: ConfigFile
the configuration
data_snipped_secs: float
duration of data snipped processed at once in seconds.
nffts_per_psd: int
nffts used for powerspectrum analysis.
fresolution: float
frequency resoltution of the spectrogram.
overlap_frac: float
overlap fraction of nffts for powerspectrum analysis.
freq_tolerance: float
frequency tollerance for combining fishes.
rise_f_th: float
minimum frequency difference between peak and base of a rise to be detected as such.
prim_time_tolerance: float
maximum time differencs in minutes in the first fish sorting step.
max_time_tolerance: float
maximum time difference in minutes between two fishes to combine these.
f_th: float
maximum frequency difference between two fishes to combine these.
"""
cfg.add_section('Fish tracking:')
cfg.add('DataSnippedSize', data_snipped_secs, 's', 'Duration of data snipped processed at once in seconds.')
cfg.add('NfftPerPsd', nffts_per_psd, '', 'Number of nffts used for powerspectrum analysis.')
cfg.add('FreqResolution', fresolution, 'Hz', 'Frequency resolution of the spectrogram')
cfg.add('OverlapFrac', overlap_frac, '', 'Overlap fraction of the nffts during Powerspectrum analysis')
cfg.add('FreqTolerance', freq_tolerance, 'Hz', 'Frequency tolernace in the first fish sorting step.')
cfg.add('RiseFreqTh', rise_f_th, 'Hz', 'Frequency threshold for the primary rise detection.')
cfg.add('PrimTimeTolerance', prim_time_tolerance, 'min', 'Time tolerance in the first fish sorting step.')
cfg.add('MaxTimeTolerance', max_time_tolerance, 'min', 'Time tolerance between the occurrance of two fishes to join them.')
cfg.add('FrequencyThreshold', f_th, 'Hz', 'Maximum Frequency difference between two fishes to join them.')
def tracker_args(cfg):
""" Translates a configuration to the
respective parameter names of the function fish_tracker().
The return value can then be passed as key-word arguments to this function.
Parameters
----------
cfg: ConfigFile
the configuration
Returns (dict): dictionary with names of arguments of the clip_amplitudes() function and their values as supplied by cfg.
-------
dict
dictionary with names of arguments of the fish_tracker() function and their values as supplied by cfg.
"""
return cfg.map({'data_snipped_secs': 'DataSnippedSize',
'nffts_per_psd': 'NfftPerPsd',
'fresolution': 'FreqResolution',
'overlap_frac': 'OverlapFrac',
'freq_tolerance': 'FreqTolerance',
'rise_f_th': 'RiseFreqTh',
'prim_time_tolerance': 'PrimTimeTolerance',
'max_time_tolerance': 'MaxTimeTolerance',
'f_th': 'FrequencyThreshold'})
def fish_tracker(data_file, start_time=0.0, end_time=-1.0, gridfile=False, save_plot=False,
save_original_fishes=False, data_snippet_secs = 60., nffts_per_psd = 4, fresolution = 0.5,
overlap_frac =.9, freq_tolerance = 0.5, rise_f_th= .5, max_time_tolerance = 10.,
f_th= 5., output_folder = '.', plot_harmonic_groups=False, verbose=0, **kwargs):
"""
Performs the steps to analyse long-term recordings of wave-type weakly electric fish including frequency analysis,
fish tracking and more.
In small data snippets spectrograms and power-spectra are calculated. With the help of the power-spectra harmonic
groups and therefore electric fish fundamental frequencies can be detected. These fundamental frequencies are
detected for every time-step throughout the whole file. Afterwards the fundamental frequencies get assigned to
different fishes.
:param data_file: (string) filepath of the analysed data file.
:param data_snippet_secs: (float) duration of data snipped processed at once in seconds. Necessary because of memory issues.
:param nffts_per_psd: (int) amount of nffts used to calculate one psd.
:param start_time: (int) analyze data from this time on (in seconds). XXX this should be a float!!!!
:param end_time: (int) stop analysis at this time (in seconds). XXX this should be a float!!!!
:param plot_data_func: (function) if plot_data_func = plot_fishes creates a plot of the sorted fishes.
:param save_original_fishes: (boolean) if True saves the sorted fishes after the first level of fish sorting.
:param kwargs: further arguments are passed on to harmonic_groups().
"""
if gridfile:
data = open_data(data_file, -1, 60.0, 10.0)
print('\n--- GRID FILE ANALYSIS ---')
print('ALL traces are analysed')
print('--------------------------')
else:
data = open_data(data_file, 0, 60.0, 10.0)
print('\n--- ONE TRACE ANALYSIS ---')
print('ONLY 1 trace is analysed')
print('--------------------------')
# with open_data(data_file, 0, 60.0, 10.0) as data:
samplerate = data.samplerate
base_name = os.path.splitext(os.path.basename(data_file))[0]
if verbose >= 1:
print('\nextract fundamentals...')
if verbose >= 2:
print('> frequency resolution = %.2f Hz' % fresolution)
print('> nfft overlap fraction = %.2f' % overlap_frac)
all_fundamentals, all_times = extract_fundamentals(data, samplerate, start_time, end_time,
data_snippet_secs, nffts_per_psd,
fresolution=fresolution,
overlap_frac=overlap_frac,
plot_harmonic_groups=plot_harmonic_groups,
verbose=verbose, **kwargs)
if verbose >= 1:
print('\nsorting fishes...')
if verbose >= 2:
print('> frequency tolerance = %.2f Hz' % freq_tolerance)
fishes = first_level_fish_sorting(all_fundamentals, base_name, all_times, freq_tolerance=freq_tolerance,
save_original_fishes=save_original_fishes, output_folder=output_folder, verbose=verbose)
min_occure_time = all_times[-1] * 0.01 / 60.
if min_occure_time > 1.:
min_occure_time = 1.
if verbose >= 1:
print('\nexclude fishes...')
if verbose >= 2:
print('> minimum occur time: %.2f min' % min_occure_time)
fishes = exclude_fishes(fishes, all_times, min_occure_time)
if len(fishes) == 0:
print('excluded all fishes. Change parameters.')
quit()
if verbose >= 1:
print('\nrise detection...')
if verbose >= 2:
print('> rise frequency th = %.2f Hz' % rise_f_th)
all_rises = detect_rises(fishes, all_times, rise_f_th, verbose=verbose)
if verbose >= 1:
print('\ncut fishes at rises...')
fishes, all_rises = cut_at_rises(fishes, all_rises)
if verbose >= 1:
print('\ncombining fishes...')
if verbose >= 2:
print('> maximum time difference: %.2f min' % max_time_tolerance)
print('> maximum frequency difference: %.2f Hz' % f_th)
fishes, all_rises = combine_fishes(fishes, all_times, all_rises, max_time_tolerance, f_th)
if verbose >= 1:
print('%.0f fishes left' % len(fishes))
if 'plt' in locals() or 'plt' in globals():
plot_fishes(fishes, all_times, all_rises, base_name, save_plot, output_folder)
if save_original_fishes:
if verbose >= 1:
print('saving data to ' + output_folder)
save_data(fishes, all_times, all_rises, base_name, output_folder)
if verbose >= 1:
print('\nWhole file processed.')
def main():
# config file name:
cfgfile = __package__ + '.cfg'
# command line arguments:
parser = argparse.ArgumentParser(
description='Analyse long single- or multi electrode EOD recordings of weakly electric fish.',
epilog='by bendalab (2015-2017)')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('-v', action='count', dest='verbose', help='verbosity level')
parser.add_argument('-c', '--save-config', nargs='?', default='', const=cfgfile,
type=str, metavar='cfgfile',
help='save configuration to file cfgfile (defaults to {0})'.format(cfgfile))
parser.add_argument('file', nargs='?', default='', type=str, help='name of the file wih the time series data or the -fishes.npy file saved with the -s option')
parser.add_argument('start_time', nargs='?', default=0.0, type=float, help='start time of analysis in min.')
parser.add_argument('end_time', nargs='?', default=-1.0, type=float, help='end time of analysis in min.')
parser.add_argument('-g', dest='grid', action='store_true', help='sum up spectrograms of all channels available.')
parser.add_argument('-p', dest='save_plot', action='store_true', help='save output plot as png file')
parser.add_argument('-s', dest='save_fish', action='store_true',
help='save fish EODs after first stage of sorting.')
parser.add_argument('-f', dest='plot_harmonic_groups', action='store_true', help='plot harmonic group detection')
parser.add_argument('-o', dest='output_folder', default=".", type=str,
help="path where to store results and figures")
args = parser.parse_args()
datafile = args.file
# set verbosity level from command line:
verbose = 0
if args.verbose != None:
verbose = args.verbose
# configuration options:
cfg = ConfigFile()
add_psd_peak_detection_config(cfg)
add_harmonic_groups_config(cfg)
add_tracker_config(cfg)
# load configuration from working directory and data directories:
cfg.load_files(cfgfile, datafile, 3, verbose)
# save configuration:
if len(args.save_config) > 0:
ext = os.path.splitext(args.save_config)[1]
if ext != os.extsep + 'cfg':
print('configuration file name must have .cfg as extension!')
else:
print('write configuration to %s ...' % args.save_config)
cfg.dump(args.save_config)
return
# check data file:
if len(datafile) == 0:
parser.error('you need to specify a file containing some data')
return
# output directory:
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
if os.path.splitext(datafile)[1] == '.npy':
rise_f_th = .5
max_time_tolerance = 10.
f_th = 5.
output_folder = args.output_folder
a = np.load(sys.argv[1], mmap_mode='r+')
fishes = a.copy()
all_times = np.load(sys.argv[1].replace('-fishes', '-times'))
min_occure_time = all_times[-1] * 0.01 / 60.
if min_occure_time > 1.:
min_occure_time = 1.
if verbose >= 1:
print('\nexclude fishes...')
if verbose >= 2:
print('> minimum occur time: %.2f min' % min_occure_time)
fishes = exclude_fishes(fishes, all_times, min_occure_time=min_occure_time)
if verbose >= 1:
print('\nrise detection...')
if verbose >= 2:
print('> rise frequency th = %.2f Hz' % rise_f_th)
all_rises = detect_rises(fishes, all_times, rise_f_th, verbose)
if verbose >= 1:
print('\ncut fishes at rises...')
fishes, all_rises = cut_at_rises(fishes, all_rises)
if verbose >= 1:
print('\ncombining fishes...')
if verbose >= 2:
print('> maximum time difference: %.2f min' % max_time_tolerance)
print('> maximum frequency difference: %.2f Hz' % f_th)
fishes, all_rises = combine_fishes(fishes, all_times, all_rises, max_time_tolerance, f_th)
if verbose >= 1:
print('%.0f fishes left' % len(fishes))
base_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
if 'plt' in locals() or 'plt' in globals():
plot_fishes(fishes, all_times, all_rises, base_name, args.save_plot, args.output_folder)
if args.save_fish:
if verbose >= 1:
print('saving data to ' + output_folder)
save_data(fishes, all_times, all_rises, base_name, output_folder)
if verbose >= 1:
print('Whole file processed.')
else:
t_kwargs = psd_peak_detection_args(cfg)
t_kwargs.update(harmonic_groups_args(cfg))
t_kwargs.update(tracker_args(cfg))
fish_tracker(datafile, args.start_time*60.0, args.end_time*60.0,
args.grid, args.save_plot, args.save_fish, output_folder=args.output_folder,
plot_harmonic_groups=args.plot_harmonic_groups, verbose=verbose, **t_kwargs)
if __name__ == '__main__':
main()
| gpl-3.0 |
deepchem/deepchem | examples/tox21/tox21_sklearn_models.py | 6 | 1160 | """
Script that trains sklearn models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
from deepchem.molnet import load_tox21
from sklearn.ensemble import RandomForestClassifier
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = load_tox21()
(train_dataset, valid_dataset, test_dataset) = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tox21_tasks, model_builder)
# Fit trained model
print("About to fit model")
model.fit(train_dataset)
model.save()
print("About to evaluate model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| mit |
urinieto/similarity_classification | code/compute_dtw.py | 1 | 13207 | #!/usr/bin/env python
"""
TODO
"""
import argparse
import cPickle as pickle
import dtw
import matplotlib.pyplot as plt
import librosa
import logging
import mir_eval
import numpy as np
import os
import pandas as pd
import scipy
import sklearn
import time
from joblib import Parallel, delayed
import msaf
from msaf import jams2
# Directory to store the features
features_dir = "../features_beats"
# Distances to use for the DTW
dist_dict = {
"L1": scipy.spatial.distance.cityblock,
"L2": scipy.spatial.distance.euclidean,
"correlation": scipy.spatial.distance.correlation
}
# Normalization techniques for the threshold and f-measure computation
norms = ["none", "min", "max", "hmean"]
def compute_threshold(intervals=None, labels=None, scores=None, norm=None):
"""Computes the thresholds for the given inputs.
Parameters
----------
intervals : np.array
Estimated segment boundary intervals.
labels : np.array
Estimated segment labels.
scores : np.array
DTW scores.
norm : str
Normalization method.
Returns
-------
thr : float > 0
Threshold for which to optimally cut the DTW score matrix.
fmeasure : float > 0
F-measure resulting using threshold.
"""
label_agreement = np.zeros((len(labels), len(labels)), dtype=bool)
for i in range(len(labels)):
for j in range(i, len(labels)):
label_agreement[i, j] = (labels[i] == labels[j])
label_agreement[j, i] = label_agreement[i, j]
time_norm = 1
durations = np.diff(intervals, axis=1).ravel()
if norm == 'min':
time_norm = np.minimum.outer(durations, durations)
elif norm == 'max':
time_norm = np.maximum.outer(durations, durations)
elif norm == 'hmean':
time_norm = 2./np.add.outer(durations, durations)
time_norm *= np.multiply.outer(durations, durations)
# TODO: have the label agreement index out nan-valued scores
scores = scores / time_norm
label_agreement[np.tril_indices_from(label_agreement, k=0)] = False
label_agreement[~np.isfinite(scores)] = False
label_disagreement = ~label_agreement
label_disagreement[np.tril_indices_from(label_disagreement, k=0)] = False
label_disagreement[~np.isfinite(scores)] = False
tp_scores = scores[label_agreement]
fp_scores = scores[label_disagreement]
num_pos = np.sum(label_agreement)
num_neg = np.sum(label_disagreement)
if num_pos == 0 or num_neg == 0:
return 0.0, 0.0
y_true = np.concatenate([np.zeros(len(tp_scores)), np.ones(len(fp_scores))])
y_score = np.concatenate([tp_scores, fp_scores])
fpr, tpr, thr = sklearn.metrics.roc_curve(y_true, y_score)
tp = num_pos * tpr
fp = num_neg * fpr
precision = tp / (tp + fp)
recall = tpr
fmeasure = np.asarray([mir_eval.util.f_measure(p, r)
for p, r in zip(precision, recall)])
k = np.argmax(fmeasure)
return thr[k], fmeasure[k]
def read_features(features_file):
"""Reads the features from the pickle file.
Parameters
----------
features_file : str
Path to the features file.
Returns
-------
cqgram : np.array
Subseg-sync constant-Q power spectrogram.
intframes : np.array
The frame indeces.
"""
with open(features_file, "r") as f:
features = pickle.load(f)
return features["cqgram"], features["intframes"]
def save_features(cqgram, intframes, subseg, features_file):
"""Reads the features from the pickle file.
Parameters
----------
cqgram : np.array
Subseg-sync constant-Q power spectrogram.
intframes : np.array
The frame indeces.
subseg : np.array
Subseq-index times.
features_file : str
Path to the output features file.
"""
features = {}
features["cqgram"] = cqgram
features["intframes"] = intframes
features["subseg"] = subseg
with open(features_file, "w") as f:
pickle.dump(features, f, protocol=-1)
def compute_features(audio_file, intervals, level):
"""Computes the subseg-sync cqt features from the given audio file, if
they are not previously computed. Saves the results in the feat_dir folder.
Parameters
----------
audio_file : str
Path to the audio file.
intervals : np.array
Intervals containing the estimated boundaries.
level : str
Level in the hierarchy.
Returns
-------
cqgram : np.array
Subseg-sync constant-Q power spectrogram.
intframes : np.array
The frame indeces.
"""
# Check if features have already been computed
if level == "small_scale":
features_file = os.path.join(features_dir, os.path.basename(audio_file).split('.')[0] +
"_small_scale.mp3.pk")
else:
features_file = os.path.join(features_dir, os.path.basename(audio_file) +
".pk")
if os.path.isfile(features_file):
return read_features(features_file)
y, sr = librosa.load(audio_file, sr=11025)
# Default hopsize is 512
hopsize = 512
cqgram = librosa.logamplitude(librosa.cqt(y, sr=sr, hop_length=hopsize)**2, ref_power=np.max)
# Track beats
y_harmonic, y_percussive = librosa.effects.hpss(y)
tempo, beats = librosa.beat.beat_track(y=y_percussive, sr=sr,
hop_length=hopsize)
# Synchronize
cqgram = librosa.feature.sync(cqgram, beats, aggregate=np.median)
intframes = None
if intervals is not None:
# convert intervals to frames
intframes = librosa.time_to_frames(intervals, sr=sr, hop_length=hopsize)
# Match intervals to subseg points
intframes = librosa.util.match_events(intframes, beats)
# Save the features
save_features(cqgram, intframes, beats, features_file)
return cqgram, intframes
def make_cost_matrix(audio_file, intervals, labels, dist, level):
"""Computes the cost matrix of the DTW from the given audio file.
Parameters
----------
audio_file : str
Path to the audio file.
intervals : np.array
Intervals containing the estimated boundaries.
labels : np.array
Estimated segment labels.
dist : fun
Distance function to be used for the DTW
level : str
Level in the hierarchy.
Returns
-------
D : np.array
DTW scores.
P : list
List containing np.arrays() representing the DTW paths.
"""
# Computes the features (return existing ones if already computed)
cqgram, intframes = compute_features(audio_file, intervals, level)
# Score matrix
D = np.nan * np.zeros((len(labels), len(labels)), dtype=np.float32)
np.fill_diagonal(D, 0)
# Path matrix
P = []
for i in range(len(labels)):
P.append([np.nan] * len(labels))
for i in range(len(labels)):
P[i][i] = 0
for i in range(len(labels)):
x_slice = cqgram[:, intframes[i, 0]:intframes[i, 1]].T
if intframes[i, 1] - intframes[i, 0] < 2:
continue
for j in range(i+1, len(labels)):
if intframes[j, 1] - intframes[j, 0] < 2:
continue
y_slice = cqgram[:, intframes[j, 0]:intframes[j, 1]].T
dtw_cost, distance, path = dtw.dtw(x_slice, y_slice, dist=dist)
D[i, j] = dtw_cost
D[j, i] = D[i, j]
path = list(path)
path[0] = np.asarray(path[0], dtype=np.int32)
path[1] = np.asarray(path[1], dtype=np.int32)
P[i][j] = path
return D, P
def compute_score(file_struct, level, dist_key):
"""Computes the DTW scores for the given file.
Parameters
----------
file_struct : FileStruct (msaf)
Object containing the struct.
level : str
Level of the hierarchy to be considered.
dist_key : str
Distance measure identifier.
Returns
-------
ret : dict
Dictionary with the results, including the following keys:
intervals : reference boundary intervals,
labels : reference segment labels,
scores : DTW scores,
paths : DTW paths,
thresholds : thresholds found for the different normalizations,
fmeasures : fmeasures computes for the different normalizations,
file_name : name of the file
"""
try:
ref_inter, ref_labels = jams2.converters.load_jams_range(
file_struct.ref_file, "sections", annotator=0, context=level)
assert len(ref_labels) > 0
D, P = make_cost_matrix(file_struct.audio_file, ref_inter, ref_labels,
dist=dist_dict[dist_key], level=level)
thresholds = {}
fmeasures = {}
for norm in norms:
thresholds[norm], fmeasures[norm] = compute_threshold(
intervals=ref_inter, labels=ref_labels, scores=D, norm=norm)
except IndexError as e:
logging.warning("warning: problem computing threshold %s at level %s" %
(file_struct.audio_file, level))
ref_inter = None
ref_labels = None
D = None
P = None
thresholds = None
fmeasures = None
except (AssertionError, IOError) as e:
logging.warning("warning: no annotations for %s" %
file_struct.audio_file)
ref_inter = None
ref_labels = None
D = None
P = None
thresholds = None
fmeasures = None
finally:
cqgram, intframes = compute_features(file_struct.audio_file, None, level)
ret = {
"intervals": ref_inter,
"labels": ref_labels,
"scores": D,
"paths": P,
"thresholds": thresholds,
"fmeasures": fmeasures,
"file_name": os.path.basename(file_struct.audio_file)
}
return ret
def save_results(dataset, level, dist_key, scores):
"""Saves the results.
Parameters
----------
dataset : str
Name of the dataset.
level : str
Level of dataset being considered.
dist_key : str
Type of distance
scores : dict
Dictionary containing the scores for all the files in the dataset.
"""
result = {
"level": level,
"dist": dist_key,
"file_scores": scores
}
out_file = "scores_datasetE%s_levelE%s_distE%s.pk" % (dataset, level,
dist_key)
with open(out_file, "w") as f:
pickle.dump(result, f, protocol=-1)
def main(ds_path, n_jobs):
"""Main function to compute DTW scores for a given root dataset and
number of processors.
Parameters
----------
ds_path : str
Path to the root of the dataset.
n_jobs : int > 0
Number of processes to use.
"""
# Datasets from which to compute the DTWs
datasets = ["SALAMI", "Isophonics"]
# Different levels for the datasets
dataset_levels = {
"Isophonics": ["function"],
#"SALAMI": ["function", "large_scale", "small_scale"]
#"SALAMI": ["function", "large_scale"]
"SALAMI": ["function"]
}
# Make sure the features folder exists
msaf.utils.ensure_dir(features_dir)
# Main loop
for dataset in datasets:
# Obtain all the files for the given dataset
files = msaf.io.get_dataset_files(ds_path, ds_name=dataset)
# Compute results for the specific level and distance
for level in dataset_levels[dataset]:
for dist_key in dist_dict.keys():
if dataset != "SALAMI" and level != "function":
continue
logging.info("Computing: %s, %s, %s" %
(dataset, level, dist_key))
# Compute scores using multiple cpus
scores = Parallel(n_jobs=n_jobs)(delayed(compute_score)(
file_struct, level, dist_key)
for file_struct in files[:])
# Save all results
save_results(dataset, level, dist_key, scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Computes the DTW scores, paths, thresholds, and f-measures"
" for multiple collections.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("ds_path",
action="store",
help="Input path to dataset")
parser.add_argument("-j",
action="store",
dest="n_jobs",
default=1,
type=int,
help="The number of threads to use")
args = parser.parse_args()
start_time = time.time()
# Setup the logger
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',
level=logging.INFO)
# Call main function
main(args.ds_path, args.n_jobs)
# Done!
logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
| mit |
StupidTortoise/personal | python/temp.py | 1 | 1396 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sklearn
import seaborn as sb
iris_data = pd.read_csv('iris-data.csv', na_values = ['NA'])
iris_data.loc[iris_data['class'] == 'versicolor', 'class'] = 'Iris-versicolor'
iris_data.loc[iris_data['class'] == 'Iris-setossa', 'class'] = 'Iris-setosa'
# This line drops any 'Iris-setosa' rows with a separal width less than 2.5 cm
iris_data = iris_data.loc[(iris_data['class'] != 'Iris-setosa') | (iris_data['sepal_width_cm'] >= 2.5)]
iris_data.loc[iris_data['class'] == 'Iris-setosa', 'sepal_width_cm'].hist()
iris_data.loc[(iris_data['class'] == 'Iris-versicolor') & (iris_data['sepal_length_cm'] < 1.0), 'sepal_length_cm'] *= 100.0
iris_data.loc[iris_data['class'] == 'Iris-versicolor', 'sepal_length_cm'].hist()
# iris_data.loc[(iris_data['sepal_length_cm'].isnull()) | (iris_data['sepal_width_cm'].isnull()) | (iris_data['petal_length_cm'].isnull()) | (iris_data['petal_width_cm'].isnull())]
average_petal_width = iris_data.loc[iris_data['class'] == 'Iris-setosa', 'petal_width_cm'].mean()
iris_data.loc[(iris_data['class'] == 'Iris-setosa') & (iris_data['petal_width_cm'].isnull()), 'petal_width_cm'] = average_petal_width
iris_data.loc[(iris_data['class'] == 'Iris-setosa') & (iris_data['petal_width_cm'] == average_petal_width)]
iris_data_clean = pd.read_csv('iris-data-clean.csv')
| gpl-2.0 |
jblackburne/scikit-learn | examples/calibration/plot_calibration.py | 66 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see https://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/tools/tests/test_tile.py | 9 | 8927 | import os
import nose
import numpy as np
from pandas.compat import zip
from pandas import DataFrame, Series, unique
import pandas.util.testing as tm
from pandas.util.testing import assertRaisesRegexp
import pandas.core.common as com
from pandas.core.algorithms import quantile
from pandas.tools.tile import cut, qcut
import pandas.tools.tile as tmod
class TestCut(tm.TestCase):
def test_simple(self):
data = np.ones(5)
result = cut(data, 4, labels=False)
desired = [1, 1, 1, 1, 1]
tm.assert_numpy_array_equal(result, desired)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 1, 2, 0])
tm.assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 2, 3, 0, 0])
tm.assert_almost_equal(bins, [0.1905, 2.575, 4.95, 7.325, 9.7])
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 2, 3, 0, 1])
tm.assert_almost_equal(bins, [0.2, 2.575, 4.95, 7.325, 9.7095])
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
tm.assert_numpy_array_equal(result.codes, [0, 0, 0, 1, 2, 0])
tm.assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0.1, 1.5, 1, 10])
def test_wrong_num_labels(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0, 1, 10],
labels=['foo', 'bar', 'baz'])
def test_cut_corner(self):
# h3h
self.assertRaises(ValueError, cut, [], 2)
self.assertRaises(ValueError, cut, [1, 2, 3], 0.5)
def test_cut_out_of_range_more(self):
# #1511
s = Series([0, -1, 0, 1, -3])
ind = cut(s, [0, 1], labels=False)
exp = [np.nan, np.nan, np.nan, 0, np.nan]
tm.assert_almost_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True)
ex_levels = ['(-0.001, 0.25]', '(0.25, 0.5]', '(0.5, 0.75]',
'(0.75, 1]']
self.assert_numpy_array_equal(result.categories, ex_levels)
result, bins = cut(arr, 4, retbins=True, right=False)
ex_levels = ['[0, 0.25)', '[0.25, 0.5)', '[0.5, 0.75)',
'[0.75, 1.001)']
self.assert_numpy_array_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor(self):
s = Series(np.random.randn(100), name='foo')
factor = cut(s, 4)
self.assertEqual(factor.name, 'foo')
def test_label_precision(self):
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = ['(-0.00072, 0.18]', '(0.18, 0.36]', '(0.36, 0.54]',
'(0.54, 0.72]']
self.assert_numpy_array_equal(result.categories, ex_levels)
def test_na_handling(self):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4)
result_arr = np.asarray(result)
ex_arr = np.where(com.isnull(arr), np.nan, result_arr)
tm.assert_almost_equal(result_arr, ex_arr)
result = cut(arr, 4, labels=False)
ex_result = np.where(com.isnull(arr), np.nan, result)
tm.assert_almost_equal(result, ex_result)
def test_inf_handling(self):
data = np.arange(6)
data_ser = Series(data,dtype='int64')
result = cut(data, [-np.inf, 2, 4, np.inf])
result_ser = cut(data_ser, [-np.inf, 2, 4, np.inf])
ex_categories = ['(-inf, 2]', '(2, 4]', '(4, inf]']
tm.assert_numpy_array_equal(result.categories, ex_categories)
tm.assert_numpy_array_equal(result_ser.cat.categories, ex_categories)
self.assertEqual(result[5], '(4, inf]')
self.assertEqual(result[0], '(-inf, 2]')
self.assertEqual(result_ser[5], '(4, inf]')
self.assertEqual(result_ser[0], '(-inf, 2]')
def test_qcut(self):
arr = np.random.randn(1000)
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(bins, ex_bins)
ex_levels = cut(arr, ex_bins, include_lowest=True)
self.assert_numpy_array_equal(labels, ex_levels)
def test_qcut_bounds(self):
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
self.assertEqual(len(np.unique(factor)), 10)
def test_qcut_specify_quantiles(self):
arr = np.random.randn(100)
factor = qcut(arr, [0, .25, .5, .75, 1.])
expected = qcut(arr, 4)
self.assertTrue(factor.equals(expected))
def test_qcut_all_bins_same(self):
assertRaisesRegexp(ValueError, "edges.*unique", qcut, [0,0,0,0,0,0,0,0,0,0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = result.codes == -1
ex_mask = (arr < -1) | (arr > 1)
self.assert_numpy_array_equal(mask, ex_mask)
def test_cut_pass_labels(self):
arr = [50, 5, 10, 15, 20, 30, 70]
bins = [0, 25, 50, 100]
labels = ['Small', 'Medium', 'Large']
result = cut(arr, bins, labels=labels)
exp = cut(arr, bins)
exp.categories = labels
self.assertTrue(result.equals(exp))
def test_qcut_include_lowest(self):
values = np.arange(10)
cats = qcut(values, 4)
ex_levels = ['[0, 2.25]', '(2.25, 4.5]', '(4.5, 6.75]', '(6.75, 9]']
self.assertTrue((cats.categories == ex_levels).all())
def test_qcut_nas(self):
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
self.assertTrue(com.isnull(result[:20]).all())
def test_label_formatting(self):
self.assertEqual(tmod._trim_zeros('1.000'), '1')
# it works
result = cut(np.arange(11.), 2)
result = cut(np.arange(11.) / 1e10, 2)
# #1979, negative numbers
result = tmod._format_label(-117.9998, precision=3)
self.assertEqual(result, '-118')
result = tmod._format_label(117.9998, precision=3)
self.assertEqual(result, '118')
def test_qcut_binning_issues(self):
# #1978, 1979
path = os.path.join(curpath(), 'cut_data.csv')
arr = np.loadtxt(path)
result = qcut(arr, 20)
starts = []
ends = []
for lev in result.categories:
s, e = lev[1:-1].split(',')
self.assertTrue(s != e)
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
self.assertTrue(sp < sn)
self.assertTrue(ep < en)
self.assertTrue(ep <= sn)
def test_cut_return_categorical(self):
from pandas import Categorical
s = Series([0,1,2,3,4,5,6,7,8])
res = cut(s,3)
exp = Series(Categorical.from_codes([0,0,0,1,1,1,2,2,2],
["(-0.008, 2.667]", "(2.667, 5.333]", "(5.333, 8]"],
ordered=True))
tm.assert_series_equal(res, exp)
def test_qcut_return_categorical(self):
from pandas import Categorical
s = Series([0,1,2,3,4,5,6,7,8])
res = qcut(s,[0,0.333,0.666,1])
exp = Series(Categorical.from_codes([0,0,0,1,1,1,2,2,2],
["[0, 2.664]", "(2.664, 5.328]", "(5.328, 8]"],
ordered=True))
tm.assert_series_equal(res, exp)
def test_series_retbins(self):
# GH 8589
s = Series(np.arange(4))
result, bins = cut(s, 2, retbins=True)
tm.assert_numpy_array_equal(result.cat.codes.values, [0, 0, 1, 1])
tm.assert_almost_equal(bins, [-0.003, 1.5, 3])
result, bins = qcut(s, 2, retbins=True)
tm.assert_numpy_array_equal(result.cat.codes.values, [0, 0, 1, 1])
tm.assert_almost_equal(bins, [0, 1.5, 3])
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
muthujothi/CrowdAnalytix-CrimeRates-PredictiveModelling | curate_data.py | 1 | 1354 | import pandas as pd
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
import csv
#Load the train data
df_1 = pd.read_csv('C:/Pst Files/CrowdAnalytix/CrimeRates/CA_Crime_Rate_Test_old-bk .csv')
#x = df_1.ix[:,8]
f = lambda x: x.fillna(x.median(), inplace=True)
df_1.apply(f)
df_1.to_csv('C:/Pst Files/CrowdAnalytix/CrimeRates/CA_Crime_Rate_MungedData_Test.csv')
print "done"
'''
f = lambda x: x.fillna(x.mean())
df_1.apply(f)
df_1.to_csv('C:/Pst Files/CrowdAnalytix/CrimeRates/CA_Crime_Rate_MungedData .csv')
print "done"
'''
'''
feature_means = []
feature_medians = []
for i in range (2, 120):
df_feature = df_1.ix[:,i]
ft_mean = round(np.mean(df_feature), 5)
ft_median = round(np.median(df_feature), 5)
feature_means.append(ft_mean)
feature_medians.append(ft_median)
data_reader = csv.reader(open('C:/Pst Files/CrowdAnalytix/CrimeRates/CA_Crime_Rate_Train_old-bk .csv','rb'))
open_file_object = csv.writer(open("C:/Pst Files/CrowdAnalytix/CrimeRates/CA_Crime_Rate_MungedData.csv", "wb"))
header = data_reader.next() #skip the first line of the test file.
for data in data_reader:
for index in data:
idx = 0
val = str(data[idx])
if val != "":
data[idx] = feature_medians[idx-2]
idx += 1
open_file_object.writerow(data)
print "done"
'''
| mit |
brentp/methylcode | scripts/summary_plot.py | 1 | 4403 | """
plot a histogram of the methylation data per chromosome.
usage:
%prog [options] dir1/ dir2/ ...
where dir1/ and dir2/ contain the .bin files from a run of methylcoder.
as many directories as you want.
"""
import matplotlib
matplotlib.rc('axes', edgecolor='#aaaaaa', linewidth=0.9)
matplotlib.rc('font', family='sans-serif')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import glob
import os.path as op
import numpy as np
def avg(c, t, m):
ac = np.fromfile(c, dtype=np.uint32)
at = np.fromfile(t, dtype=np.uint32)
am = np.fromfile(m, dtype=np.uint8)
mes = []
for mt in range(1, 4):
ctx = (am == mt) | (am == mt + 3)
ctx &= (at + ac) > 0
me = ac[ctx].sum() / float(ac[ctx].sum() + at[ctx].sum())
mes.append(me)
return mes
import collections
def get_data(ctms, labels):
names = collections.defaultdict(list)
for label, ctm in zip(labels, ctms):
avgs = []
for c, t, m in ctm:
mes = avg(c, t, m)
name = op.basename(c).replace('.c.bin', '')
pidx = name.find(".") + 1
name = name[pidx:]
avgs.append((name, mes))
avgs.sort()
for name, (cgs, chgs, chhs) in avgs:
names[name].append((cgs, chgs, chhs))
return dict(names)
def main(ctms, labels, opts):
names = get_data(ctms, labels)
plot(names, labels, opts)
def add_label(rect, label):
y = rect.get_height() + rect.get_y() + 0.01
x = rect.get_x() + rect.get_width() * 0.15
plt.text(x, y, label, horizontalalignment='left',
verticalalignment='bottom',
rotation='vertical'
)
def plot(names, labels, opts):
xlabels = sorted(names.keys())
n = len(names[xlabels[0]])
fig = plt.figure()
#ax = fig.add_axes([0, 0, 1, 1])
spacing = 0.04
width = ((1 - spacing * (n - 1)) / (n + 1))
xs = 2.0 * spacing + np.arange(len(xlabels))
for i in range(n):
cgs = np.array([names[schr][i][0] for schr in xlabels])
chgs = np.array([names[schr][i][1] for schr in xlabels])
chhs = np.array([names[schr][i][2] for schr in xlabels])
p1 = plt.bar(xs, cgs, width, color='r', linewidth=0)
p2 = plt.bar(xs, chgs, width, color='g', bottom=cgs, linewidth=0)
p3 = plt.bar(xs, chhs, width, color='b', bottom=cgs + chgs, linewidth=0)
xs += width + spacing
if n > 1:
add_label(p3[0], labels[i])
plt.xticks(xs - 0.66 * width - (width + spacing) * i/2., xlabels)
plt.legend((p1[0], p2[0], p3[0]), ('CG', 'CHG', 'CHH'))
plt.xlabel("Sequence Id")
plt.ylabel("Methylation c/(c + t)")
if opts.title: plt.title(opts.title)
print >>sys.stderr, "saving to %s" % opts.out
plt.savefig(opts.out)
if __name__ == "__main__":
import optparse
p = optparse.OptionParser(__doc__)
p.add_option("--out", dest="out", default="/var/www/t/met.png", help=\
"path to save image. extension (.png/.pdf) will determine file-type")
p.add_option("--title", dest="title", default="", help="image title")
p.add_option("--exclude", dest="exclude", default=None,
help="exclude these chromosomes. e.g. --exclude 'm|c'")
p.add_option("--labels", dest="labels", default=None, help="by default, "
"the directories specified in args are used to label the series "
"when plotting more than one directory. if --labels are specified "
"these will be used instead. must be pipe '|' delimited: e.g. : "
"--labels 'series a|series b|other series' the number specified"
" must match the number of directories sent on args")
opts, args = p.parse_args()
if len(args) == 0:
sys.exit(p.print_help())
ctms = []
exclude = [] if opts.exclude is None else opts.exclude.split("|")
for bin_dir in args:
cs = glob.glob("%s/*.c.bin" % bin_dir)
cs = [c for c in cs if not any(c.endswith("%s.c.bin" % e) for e in exclude)]
assert cs
ts = [c.replace('.c.bin', '.t.bin') for c in cs]
mt = [c.replace('.c.bin', '.methyltype.bin') for c in cs]
ctms.append(zip(cs, ts, mt))
labels = opts.labels.split("|") if opts.labels \
else [x.rstrip("/") for x in args]
main(ctms, labels, opts)
| bsd-3-clause |
anjalisood/spark-tk | regression-tests/sparktkregtests/testcases/frames/power_iteration_clustering_test.py | 11 | 5681 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test power iteration Clustering against known values"""
import math
import unittest
from sparktkregtests.lib import sparktk_test
class PowerIterationTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Import the files to be tested."""
super(PowerIterationTest, self).setUp()
data = self.get_file("pic_circles_data.csv")
self.schema = [('Source', int),
('Destination', int),
('Similarity', float)]
self.frame = self.context.frame.import_csv(data, schema=self.schema)
def test_doc_example(self):
""" Example from the API documentation """
data = [[1,2,1.0],
[1,3,0.3],
[2,3,0.3],
[3,0,0.03],
[0,5,0.01],
[5,4,0.3],
[5,6,1.0],
[4,6,0.3]]
frame = self.context.frame.create(data, schema=self.schema)
result = frame.power_iteration_clustering(
"Source", "Destination", "Similarity", k=3, max_iterations=20)
#check cluster sizes
actual_cluster_sizes = sorted(result.cluster_sizes.values())
expected_cluster_sizes = [1, 3, 3]
self.assertItemsEqual(actual_cluster_sizes, expected_cluster_sizes)
#check values assigned to each cluster
actual_assignment = result.frame.to_pandas(
result.frame.count()).groupby("cluster")
grouped_assignment = [list(val["id"]) for index, val in actual_assignment]
expected_assignment = [[4, 5, 6], [1, 2, 3], [0]]
self.assertEqual(sorted(map(sorted, grouped_assignment)), sorted(map(sorted, expected_assignment)))
def test_circles_default(self):
""" Test pic on similarity matrix for two concentric cicles """
result = self.frame.power_iteration_clustering(
"Source", "Destination", "Similarity", k=2)
#check cluster sizes
actual_cluster_sizes = sorted(result.cluster_sizes.values())
expected_cluster_sizes = [5, 15]
self.assertItemsEqual(actual_cluster_sizes, expected_cluster_sizes)
#check values assigned to each cluster
actual_assignment = result.frame.to_pandas(
result.frame.count()).values.tolist()
expected_assignment = \
[[4,1], [16,1], [14,1], [0,1], [6,1], [8,1],
[12,1], [18,1], [10,1], [2,1], [13,2], [19,2],
[15,2], [11,2], [1,1], [17,2], [3,1], [7,1], [9,1], [5,1]]
self.assertItemsEqual(actual_assignment, expected_assignment)
def test_circles_max_iterations(self):
""" Test pic with max_iteration = 40 """
result = self.frame.power_iteration_clustering(
"Source", "Destination", "Similarity", k=2, max_iterations=40)
#check cluster sizes
actual_cluster_sizes = sorted(result.cluster_sizes.values())
expected_cluster_sizes = [10, 10]
self.assertItemsEqual(actual_cluster_sizes, expected_cluster_sizes)
#check values assigned to each cluster
actual_assignment = result.frame.to_pandas(
result.frame.count()).values.tolist()
expected_assignment = \
[[4,1], [16,2], [14,2], [0,1], [6,1], [8,1],
[12,2], [18,2], [10,2], [2,1], [13,2], [19,2],
[15,2], [11,2], [1,1], [17,2], [3,1], [7,1], [9,1], [5,1]]
self.assertItemsEqual(actual_assignment, expected_assignment)
def test_neg_similarity(self):
""" Test pic with negative similarity values """
bad_frame = self.context.frame.create(
[[1, 0, -1.0], [1, 2, 0.1]], schema=self.schema)
with self.assertRaisesRegexp(
Exception, "Similarity must be nonnegative but found .*"):
result = bad_frame.power_iteration_clustering(
"Source", "Destination", "Similarity", k=2, max_iterations=10)
def test_bad_column_name(self):
""" Test behavior for bad source column name """
with self.assertRaisesRegexp(
Exception, "Invalid column name ERR .*"):
result = self.frame.power_iteration_clustering(
"ERR", "Destination", "Similarity")
def test_bad_k_value(self):
""" Tests behavior for bad number of k """
with self.assertRaisesRegexp(
Exception,
"Number of clusters must be must be greater than 1"):
result = self.frame.power_iteration_clustering(
"Source", "Destination", "Similarity", k=0)
def test_bad_max_iterations(self):
""" Tests behavior for negative max_iterations """
with self.assertRaisesRegexp(
Exception,
"Maximum number of iterations must be greater than 0"):
result = self.frame.power_iteration_clustering(
"Source", "Destination", "Similarity", k=2,
max_iterations=-1)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
jackchi/interview-prep | complexity/graph_complexity.py | 1 | 1516 | """
alganal.py
Description:
A utility program to plot algorithmic time complexity of a function.
Author: Mahesh Venkitachalam
Website: electronut.in
"""
from matplotlib import pyplot
import numpy as np
import timeit
from functools import partial
import random
def fconst(N):
"""
O(1) function
"""
x = 1
def flinear(N):
"""
O(n) function
"""
x = [i for i in range(N)]
def fsquare(N):
"""
O(n^2) function
"""
for i in range(N):
for j in range(N):
x = i*j
def fshuffle(N):
# O(N)
random.shuffle(list(range(N)))
def fsort(N):
x = list(range(N))
random.shuffle(x)
x.sort()
def fexponential(N):
x = 2**N
def plotTC(fn, nMin, nMax, nInc, nTests):
"""
Run timer and plot time complexity
"""
x = []
y = []
for i in range(nMin, nMax, nInc):
N = i
testNTimer = timeit.Timer(partial(fn, N))
t = testNTimer.timeit(number=nTests)
x.append(i)
y.append(t)
p1 = pyplot.plot(x, y, 'o')
pyplot.legend([p1,], [fn.__name__, ])
# main() function
def main():
print('Analyzing Algorithms...')
# plotTC(flinear, 10, 1000, 10, 10)
plotTC(fsquare, 10, 100, 2, 10)
plotTC(fexponential, 10, 100, 2, 10)
#plotTC(fshuffle, 10, 1000, 1000, 10)
plotTC(fsort, 10, 100, 2, 10)
# enable this in case you want to set y axis limits
#pyplot.ylim((-0.1, 0.5))
# show plot
pyplot.show()
# call main
if __name__ == '__main__':
main() | mit |
andybrnr/QuantEcon.py | examples/perm_inc_ir.py | 7 | 1450 | """
Impulse response functions for the LQ permanent income model permanent and
transitory shocks.
"""
import numpy as np
import matplotlib.pyplot as plt
r = 0.05
beta = 1 / (1 + r)
T = 20 # Time horizon
S = 5 # Impulse date
sigma1 = sigma2 = 0.15
def time_path(permanent=False):
"Time path of consumption and debt given shock sequence"
w1 = np.zeros(T+1)
w2 = np.zeros(T+1)
b = np.zeros(T+1)
c = np.zeros(T+1)
if permanent:
w1[S+1] = 1.0
else:
w2[S+1] = 1.0
for t in range(1, T):
b[t+1] = b[t] - sigma2 * w2[t]
c[t+1] = c[t] + sigma1 * w1[t+1] + (1 - beta) * sigma2 * w2[t+1]
return b, c
fig, axes = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.5)
p_args = {'lw': 2, 'alpha': 0.7}
L = 0.175
for ax in axes:
ax.grid(alpha=0.5)
ax.set_xlabel(r'Time')
ax.set_ylim(-L, L)
ax.plot((S, S), (-L, L), 'k-', lw=0.5)
ax = axes[0]
b, c = time_path(permanent=0)
ax.set_title('impulse-response, transitory income shock')
ax.plot(list(range(T+1)), c, 'g-', label="consumption", **p_args)
ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args)
ax.legend(loc='upper right')
ax = axes[1]
b, c = time_path(permanent=1)
ax.set_title('impulse-response, permanent income shock')
ax.plot(list(range(T+1)), c, 'g-', label="consumption", **p_args)
ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args)
ax.legend(loc='lower right')
plt.show()
| bsd-3-clause |
rsivapr/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 15 | 2202 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
def test_binomial_deviance():
"""Check binomial deviance loss.
Check against alternative definitions in ESLII.
"""
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
"""Check log odds estimator. """
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/tests/test_typedef.py | 15 | 16852 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
import datetime
import decimal
from typing import List
import pandas
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
FloatType,
IntegerType,
LongType,
StringType,
StructField,
StructType,
ByteType,
ShortType,
DateType,
DecimalType,
DoubleType,
TimestampType,
)
from pyspark.pandas.typedef import (
as_spark_type,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
infer_return_type,
pandas_on_spark_type,
)
from pyspark import pandas as ps
class TypeHintTests(unittest.TestCase):
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_from_pandas_instances(self):
def func() -> pd.Series[int]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.Series[np.float]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.float64)
self.assertEqual(inferred.spark_type, DoubleType())
def func() -> "pd.DataFrame[np.float, str]":
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pandas.DataFrame[np.float]":
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.Series[int]":
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[np.float, str]:
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> pd.DataFrame[np.float]:
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.Series[pdf.b.dtype]: # type: ignore
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, CategoricalDtype(categories=["a", "b", "c"]))
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
def test_if_pandas_implements_class_getitem(self):
# the current type hint implementation of pandas DataFrame assumes pandas doesn't
# implement '__class_getitem__'. This test case is to make sure pandas
# doesn't implement them.
assert not ps._frame_has_class_getitem
assert not ps._series_has_class_getitem
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances(self):
def func() -> 'pd.DataFrame["a" : np.float, "b":str]': # noqa: F405
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.DataFrame['a': np.float, 'b': int]": # noqa: F405
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType(
[StructField("(x, a)", LongType()), StructField("(y, b)", LongType())]
)
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances_negative(self):
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> pd.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> pd.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_infer_schema_with_names_negative(self):
def try_infer_return_type():
def f() -> 'ps.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> ps.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'ps.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F405
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> ps.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> ps.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_as_spark_type_pandas_on_spark_dtype(self):
type_mapper = {
# binary
np.character: (np.character, BinaryType()),
np.bytes_: (np.bytes_, BinaryType()),
np.string_: (np.bytes_, BinaryType()),
bytes: (np.bytes_, BinaryType()),
# integer
np.int8: (np.int8, ByteType()),
np.byte: (np.int8, ByteType()),
np.int16: (np.int16, ShortType()),
np.int32: (np.int32, IntegerType()),
np.int64: (np.int64, LongType()),
np.int: (np.int64, LongType()),
int: (np.int64, LongType()),
# floating
np.float32: (np.float32, FloatType()),
np.float: (np.float64, DoubleType()),
np.float64: (np.float64, DoubleType()),
float: (np.float64, DoubleType()),
# string
np.str: (np.unicode_, StringType()),
np.unicode_: (np.unicode_, StringType()),
str: (np.unicode_, StringType()),
# bool
np.bool: (np.bool, BooleanType()),
bool: (np.bool, BooleanType()),
# datetime
np.datetime64: (np.datetime64, TimestampType()),
datetime.datetime: (np.dtype("datetime64[ns]"), TimestampType()),
# DateType
datetime.date: (np.dtype("object"), DateType()),
# DecimalType
decimal.Decimal: (np.dtype("object"), DecimalType(38, 18)),
# ArrayType
np.ndarray: (np.dtype("object"), ArrayType(StringType())),
List[bytes]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.character]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.bytes_]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.string_]: (np.dtype("object"), ArrayType(BinaryType())),
List[bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[np.bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[datetime.date]: (np.dtype("object"), ArrayType(DateType())),
List[np.int8]: (np.dtype("object"), ArrayType(ByteType())),
List[np.byte]: (np.dtype("object"), ArrayType(ByteType())),
List[decimal.Decimal]: (np.dtype("object"), ArrayType(DecimalType(38, 18))),
List[float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float64]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float32]: (np.dtype("object"), ArrayType(FloatType())),
List[np.int32]: (np.dtype("object"), ArrayType(IntegerType())),
List[int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int64]: (np.dtype("object"), ArrayType(LongType())),
List[np.int16]: (np.dtype("object"), ArrayType(ShortType())),
List[str]: (np.dtype("object"), ArrayType(StringType())),
List[np.unicode_]: (np.dtype("object"), ArrayType(StringType())),
List[datetime.datetime]: (np.dtype("object"), ArrayType(TimestampType())),
List[np.datetime64]: (np.dtype("object"), ArrayType(TimestampType())),
# CategoricalDtype
CategoricalDtype(categories=["a", "b", "c"]): (
CategoricalDtype(categories=["a", "b", "c"]),
LongType(),
),
}
for numpy_or_python_type, (dtype, spark_type) in type_mapper.items():
self.assertEqual(as_spark_type(numpy_or_python_type), spark_type)
self.assertEqual(pandas_on_spark_type(numpy_or_python_type), (dtype, spark_type))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
as_spark_type(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
as_spark_type(np.dtype("object"))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
pandas_on_spark_type(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
pandas_on_spark_type(np.dtype("object"))
@unittest.skipIf(not extension_dtypes_available, "The pandas extension types are not available")
def test_as_spark_type_extension_dtypes(self):
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
type_mapper = {
Int8Dtype(): ByteType(),
Int16Dtype(): ShortType(),
Int32Dtype(): IntegerType(),
Int64Dtype(): LongType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_object_dtypes_available, "The pandas extension object types are not available"
)
def test_as_spark_type_extension_object_dtypes(self):
from pandas import BooleanDtype, StringDtype
type_mapper = {
BooleanDtype(): BooleanType(),
StringDtype(): StringType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_float_dtypes_available, "The pandas extension float types are not available"
)
def test_as_spark_type_extension_float_dtypes(self):
from pandas import Float32Dtype, Float64Dtype
type_mapper = {
Float32Dtype(): FloatType(),
Float64Dtype(): DoubleType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(pandas_on_spark_type(extension_dtype), (extension_dtype, spark_type))
if __name__ == "__main__":
from pyspark.pandas.tests.test_typedef import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
agentfog/qiime | scripts/plot_semivariogram.py | 9 | 15002 | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Antonio Gonzalez Pena"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Antonio Gonzalez Pena", "Kyle Patnode", "Yoshiki Vazquez-Baeza"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "[email protected]"
from qiime.plot_taxa_summary import make_legend
from qiime.colors import get_qiime_hex_string_color
from qiime.util import parse_command_line_parameters, get_options_lookup
from qiime.util import make_option
from qiime.plot_semivariogram import fit_semivariogram, FitModel
from qiime.parse import parse_distmat, parse_mapping_file
from qiime.filter import (filter_samples_from_distance_matrix,
sample_ids_from_metadata_description)
from matplotlib import use
use('Agg', warn=False)
from pylab import (plot, xlabel, ylabel, title, savefig, ylim, xlim, legend,
show, figure)
from numpy import asarray
import os
from os.path import splitext
from StringIO import StringIO
from copy import deepcopy
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = "Fits a model between two distance matrices " +\
"and plots the result"
script_info['script_description'] = "Fits a spatial autocorrelation model " +\
"between two matrices and plots the result. This script will work with " +\
"two distance matrices but will ignore the 0s at the diagonal and the " +\
"values that go to N/A. See distance_matrix_from_mapping.py."
script_info['script_usage'] = []
script_info['script_usage'].append(("Fitting", "For this script, the user "
"supplies two distance matrices (i.e. resulting file from "
"beta_diversity.py), along with the output filename (e.g. semivariogram), "
"and the model to fit, as follows:", "%prog -x distance.txt -y unifrac.txt "
"-o semivariogram_exponential.png"))
script_info['script_usage'].append(("", "Modify the the default method to "
"gaussian", "%prog -x distance.txt -y unifrac.txt --model gaussian -o "
"semivariogram_gaussian.png"))
script_info['script_usage'].append(("Color semivariograms by a category in"
" the metadata mapping file", "Using a header name in the mapping file"
" (Time), create two separate semivariograms in the same plot, an "
"accompanying file with the color coding will be created"
"(categories_legend.eps), both the legends and the plot will be in eps "
"format.", "%prog -y unweighted_unifrac_dm.txt -x time_dm.txt --model "
"gaussian -m Fasting_Map.txt -o categories.eps -c Treatment"))
script_info['output_description'] = "The resulting output file consists of a " +\
"pdf image containing the plot between the two distances matrices and the" +\
" fitted model"
script_info['required_options'] = [
make_option('-x', '--input_path_x', type='existing_filepath',
help='path to distance matrix to be displayed in the x axis'),
make_option('-y', '--input_path_y', type='existing_filepath',
help='path to distance matrix to be displayed in the y axis'),
make_option('-o', '--output_path', type='new_path',
help='output path. directory for batch processing, ' +
'filename for single file operation'),
]
script_info['optional_options'] = [
make_option('-b', '--binning', type='string',
default=None, help='binning ranges. Format: [increment,top_limit], when ' +
'top_limit is -1=infinitum; you can specify several ranges using the same ' +
'format, i.e. [2.5,10][50,-1] will set two bins, one from 0-10 using 2.5 ' +
'size steps and from 10-inf using 50 size steps. Note that the binning is ' +
'used to clean the plots (reduce number of points) but ignored to fit the ' +
'model. [default: %default]'),
make_option('--ignore_missing_samples', help='This will overpass the error raised ' +
'when the matrices have different sizes/samples', action='store_true', default=False),
make_option(
'--x_max',
type='float',
help='x axis max limit [default: auto]',
default=None),
make_option(
'--x_min',
type='float',
help='x axis min limit [default: auto]',
default=None),
make_option(
'--y_max',
type='float',
help='y axis max limit [default: auto]',
default=None),
make_option(
'--y_min',
type='float',
help='y axis min limit [default: auto]',
default=None),
make_option(
'-X', '--x_label', default='Distance Dissimilarity (m)', type='string',
help='Label for the x axis [default: %default]'),
make_option(
'-Y', '--y_label', default='Community Dissimilarity', type='string',
help='Label for the y axis [default: %default]'),
make_option('-t', '--fig_title', default='Semivariogram', type='string',
help='Title of the plot [default: %default]'),
make_option('--dot_color', type='string', help='dot color for plot, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default="white"),
make_option('--dot_marker', type='string', help='dot color for plot, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default="o"),
make_option('--line_color', type='string', help='line color for plot, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default="blue"),
make_option('--dot_alpha', type='float', help='alpha for dots, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default=1),
make_option('--line_alpha', type='float', help='alpha for dots, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default=1),
make_option('--model', type='choice',
choices=FitModel.options, default='exponential',
help='model to be fitted to the data. Valid ' +
'choices are:' + ', '.join(FitModel.options) + '. [default: %default]'),
make_option('-p', '--print_model', action='store_true',
help='Print in the title of the plot the function of the fit. ' +
'[default: %default]', default=False),
make_option('-c', '--category', type='string', help='category to color each of'
' the trajectories when you have multiple treatments [default: %default]',
default=None),
make_option('-m', '--mapping_fp', type='existing_filepath', help='metadata '
'mapping file, only used when coloring by a category, a file with the '
'legends and color coding will be created with the suffix legend '
'[default: %default]',
default=None)
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
category = opts.category
mapping_fp = opts.mapping_fp
colors_used = []
if (category and mapping_fp is None) or (category is None and mapping_fp):
option_parser.error('If coloring by a metadata category, both the '
'category and the mapping file must be supplied.')
elif mapping_fp and category:
mapping_data, mapping_headers, _ = parse_mapping_file(open(mapping_fp,
'U'))
if category not in mapping_headers:
option_parser.error("The category supplied must exist in the "
"metadata mapping file, '%s' does not exist." % category)
index = mapping_headers.index(category)
categories = list(set([line[index] for line in mapping_data]))
list_of_plots = []
if opts.binning is None:
ranges = []
else:
# simple ranges format validation
if opts.binning.count('[') != opts.binning.count(']') or\
opts.binning.count('[') != opts.binning.count(','):
raise ValueError("The binning input has an error: '%s'; " % +
"\nthe format should be [increment1,top_limit1][increment2,top_limit2]")
# spliting in ranges
rgn_txt = opts.binning.split('][')
# removing left [ and right ]
rgn_txt[0] = rgn_txt[0][1:]
rgn_txt[-1] = rgn_txt[-1][:-1]
# converting into int
ranges = []
max = 0
for i, r in enumerate(rgn_txt):
try:
values = map(float, r.split(','))
except ValueError:
raise ValueError(
"Not a valid format for binning %s" %
opts.binning)
if len(values) != 2:
raise ValueError(
"All ranges must have only 2 values: [%s]" %
r)
elif i + 1 != len(rgn_txt):
if values[0] > values[1]:
raise ValueError(
"The bin value can't be greater than the max value: [%s]" %
r)
elif values < 0:
raise ValueError(
"This value can not be negative: [%s]" %
r)
elif max > values[1]:
raise ValueError(
"This value can not smaller than the previous one: [%s]" %
r)
else:
max = values[1]
ranges.append(values)
x_samples, x_distmtx = parse_distmat(open(opts.input_path_x, 'U'))
y_samples, y_distmtx = parse_distmat(open(opts.input_path_y, 'U'))
if opts.ignore_missing_samples:
ignoring_from_x = list(set(x_samples) - set(y_samples))
ignoring_from_y = list(set(y_samples) - set(x_samples))
if opts.verbose:
print '\nFrom %s we are ignoring: %s\n' % (opts.input_path_x, ignoring_from_x)
print '\nFrom %s we are ignoring: %s\n' % (opts.input_path_y, ignoring_from_y)
print '\nOnly using: %s\n' % (list(set(x_samples) & set(y_samples)))
x_file = StringIO(
filter_samples_from_distance_matrix((x_samples, x_distmtx), ignoring_from_x))
x_samples, x_distmtx = parse_distmat(x_file)
y_file = StringIO(
filter_samples_from_distance_matrix((y_samples, y_distmtx), ignoring_from_y))
y_samples, y_distmtx = parse_distmat(y_file)
else:
if x_distmtx.shape != y_distmtx.shape:
raise ValueError('The distance matrices have different sizes. ' +
'You can cancel this error by passing --ignore_missing_samples')
figure()
if category is None:
x_val, y_val, x_fit, y_fit, func_text = fit_semivariogram(
(x_samples, x_distmtx), (y_samples, y_distmtx), opts.model, ranges)
plot(
x_val,
y_val,
color=opts.dot_color,
marker=opts.dot_marker,
linestyle="None",
alpha=opts.dot_alpha)
plot(
x_fit,
y_fit,
linewidth=2.0,
color=opts.line_color,
alpha=opts.line_alpha)
else:
# not all the categories that are going to be enumerated are found in
# the distance matrices i.e. the mapping file is a superset that can
# contain more samples than the distance matrices
used_categories = deepcopy(categories)
for index, single_category in enumerate(categories):
good_sample_ids = sample_ids_from_metadata_description(
open(mapping_fp), '%s:%s' % (category, single_category))
try:
_y_samples, _y_distmtx = parse_distmat(StringIO(
filter_samples_from_distance_matrix((y_samples, y_distmtx),
good_sample_ids, negate=True)))
_x_samples, _x_distmtx = parse_distmat(StringIO(
filter_samples_from_distance_matrix((x_samples, x_distmtx),
good_sample_ids, negate=True)))
except ValueError:
# no samples found for this category
used_categories.remove(single_category)
continue
x_val, y_val, x_fit, y_fit, func_text = fit_semivariogram(
(_x_samples, _x_distmtx), (_y_samples, _y_distmtx),
opts.model, ranges)
# retrieve one of the colors the "QIIME" colors and add it to the
# list of used colors for the creation of the legends in the plot
color_only = get_qiime_hex_string_color(index)
colors_used.append(color_only)
plot(x_val, y_val, color=color_only, marker=opts.dot_marker,
linestyle="None", alpha=opts.dot_alpha)
plot(x_fit, y_fit, linewidth=2.0, color=color_only,
alpha=opts.line_alpha, label=single_category)
# set plot limits if requested
x_lb, x_ub = xlim()
y_lb, y_ub = ylim()
if opts.x_min is not None:
x_lb = opts.x_min
if opts.x_max is not None:
x_ub = opts.x_max
if opts.y_min is not None:
y_lb = opts.y_min
if opts.y_max is not None:
y_ub = opts.y_max
xlim(x_lb, x_ub)
ylim(y_lb, y_ub)
x_label = opts.x_label
y_label = opts.y_label
fig_title = '%s (%s)' % (opts.fig_title, opts.model)
xlabel(x_label)
ylabel(y_label)
if opts.print_model:
title(fig_title + ' ' + func_text)
else:
title(fig_title)
savefig(opts.output_path)
# print the legends after the figure is exported to avoid conflicts
if category:
# if there's a desired format, use that, else default it to png
_, extension = splitext(opts.output_path)
# remove the dot, else, make_legend will add it to the filename
extension = extension.replace('.', '')
if extension == '':
extension = 'png'
make_legend(used_categories, colors_used, 0, 0, 'black', 'white',
opts.output_path, extension, 80)
if __name__ == "__main__":
main()
| gpl-2.0 |
richter-t/espresso | samples/python/lbf.py | 4 | 2067 | #
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd
from espressomd import thermostat
from espressomd import code_info
from espressomd import lb
import numpy as np
print("""
=======================================================
= lbf.py =
=======================================================
Program Information:""")
print(code_info.features())
system = espressomd.System()
system.time_step = 0.01
system.cell_system.skin = 0.1
box_l = 50
system.box_l =[box_l, box_l, box_l]
# system.periodic = [1,1,1]
system.part.add(id=0, pos=[box_l/2.0,box_l/2.0,box_l/2.0], fix=[1,1,1])
# system.part.add(id=0, pos=[box_l/2.0,box_l/2.0,box_l/2.0], ext_force=[0,0,1])
#lbf = lb.LBFluid_GPU(agrid=1, fric=1, dens=1, visc=1, tau=0.01, ext_force=[0,0,-1.0/(box_l**3)])
lbf = lb.LBFluid(agrid=1, fric=1, dens=1, visc=1, tau=0.01, ext_force=[0,0,-1.0/(box_l**3)])
system.actors.add(lbf)
print(system.actors)
print(lbf.get_params())
f_list = []
for i in range(10):
f_list.append(system.part[0].f)
system.integrator.run(steps=10)
print(i)
f_list=np.array(f_list)
import matplotlib.pyplot as pp
fig1=pp.figure()
ax=fig1.add_subplot(111)
ax.plot(f_list[:,0],label="F_x")
ax.plot(f_list[:,1],label="F_y")
ax.plot(f_list[:,2],label="F_z")
ax.legend()
ax.set_xlabel("t")
ax.set_ylabel("F")
pp.show()
| gpl-3.0 |
pradyu1993/scikit-learn | examples/plot_digits_classification.py | 3 | 2225 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print __doc__
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits,
# let's have a look at the first 3 images, stored in the `images`
# attribute of the dataset. If we were working from image files, we
# could load them using pylab.imread. For these images know which
# digit they represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index + 1)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print "Classification report for classifier %s:\n%s\n" % (
classifier, metrics.classification_report(expected, predicted))
print "Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)
for index, (image, prediction) in enumerate(
zip(digits.images[n_samples / 2:], predicted)[:4]):
pl.subplot(2, 4, index + 5)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Prediction: %i' % prediction)
pl.show()
| bsd-3-clause |
sumspr/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
MDAnalysis/mdanalysis | package/MDAnalysis/analysis/psa.py | 1 | 87765 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
r"""
Calculating path similarity --- :mod:`MDAnalysis.analysis.psa`
==========================================================================
:Author: Sean Seyler
:Year: 2015
:Copyright: GNU Public License v3
.. versionadded:: 0.10.0
The module contains code to calculate the geometric similarity of trajectories
using path metrics such as the Hausdorff or Fréchet distances
[Seyler2015]_. The path metrics are functions of two paths and return a
nonnegative number, i.e., a distance. Two paths are identical if their distance
is zero, and large distances indicate dissimilarity. Each path metric is a
function of the individual points (e.g., coordinate snapshots) that comprise
each path and, loosely speaking, identify the two points, one per path of a
pair of paths, where the paths deviate the most. The distance between these
points of maximal deviation is measured by the root mean square deviation
(RMSD), i.e., to compute structural similarity.
One typically computes the pairwise similarity for an ensemble of paths to
produce a symmetric distance matrix, which can be clustered to, at a glance,
identify patterns in the trajectory data. To properly analyze a path ensemble,
one must select a suitable reference structure to which all paths (each
conformer in each path) will be universally aligned using the rotations
determined by the best-fit rmsds. Distances between paths and their structures
are then computed directly with no further alignment. This pre-processing step
is necessary to preserve the metric properties of the Hausdorff and Fréchet
metrics; using the best-fit rmsd on a pairwise basis does not generally
preserve the triangle inequality.
Note
----
The `PSAnalysisTutorial`_ outlines a typical application of PSA to
a set of trajectories, including doing proper alignment,
performing distance comparisons, and generating heat
map-dendrogram plots from hierarchical clustering.
.. Rubric:: References
.. [Seyler2015] Seyler SL, Kumar A, Thorpe MF, Beckstein O (2015)
Path Similarity Analysis: A Method for Quantifying
Macromolecular Pathways. PLoS Comput Biol 11(10): e1004568.
doi: `10.1371/journal.pcbi.1004568`_
.. _`10.1371/journal.pcbi.1004568`: http://dx.doi.org/10.1371/journal.pcbi.1004568
.. _`PSAnalysisTutorial`: https://github.com/Becksteinlab/PSAnalysisTutorial
Helper functions and variables
------------------------------
The following convenience functions are used by other functions in this module.
.. autofunction:: sqnorm
.. autofunction:: get_msd_matrix
.. autofunction:: get_coord_axes
Classes, methods, and functions
-------------------------------
.. autofunction:: get_path_metric_func
.. autofunction:: hausdorff
.. autofunction:: hausdorff_wavg
.. autofunction:: hausdorff_avg
.. autofunction:: hausdorff_neighbors
.. autofunction:: discrete_frechet
.. autofunction:: dist_mat_to_vec
.. autoclass:: Path
:members:
.. attribute:: u_original
:class:`MDAnalysis.Universe` object with a trajectory
.. attribute:: u_reference
:class:`MDAnalysis.Universe` object containing a reference structure
.. attribute:: select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select frame
from :attr:`Path.u_reference`
.. attribute:: path_select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select atoms
to compose :attr:`Path.path`
.. attribute:: ref_frame
int, frame index to select frame from :attr:`Path.u_reference`
.. attribute:: u_fitted
:class:`MDAnalysis.Universe` object with the fitted trajectory
.. attribute:: path
:class:`numpy.ndarray` object representation of the fitted trajectory
.. autoclass:: PSAPair
.. attribute:: npaths
int, total number of paths in the comparison in which *this*
:class:`PSAPair` was generated
.. attribute:: matrix_id
(int, int), (row, column) indices of the location of *this*
:class:`PSAPair` in the corresponding pairwise distance matrix
.. attribute:: pair_id
int, ID of *this* :class:`PSAPair` (the pair_id:math:`^\text{th}`
comparison) in the distance vector corresponding to the pairwise distance
matrix
.. attribute:: nearest_neighbors
dict, contains the nearest neighbors by frame index and the
nearest neighbor distances for each path in *this* :class:`PSAPair`
.. attribute:: hausdorff_pair
dict, contains the frame indices of the Hausdorff pair for each path in
*this* :class:`PSAPair` and the corresponding (Hausdorff) distance
.. autoclass:: PSAnalysis
:members:
.. attribute:: universes
list of :class:`MDAnalysis.Universe` objects containing trajectories
.. attribute:: u_reference
:class:`MDAnalysis.Universe` object containing a reference structure
.. attribute:: select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select frame
from :attr:`PSAnalysis.u_reference`
.. attribute:: path_select
string, selection for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` to select atoms
to compose :attr:`Path.path`
.. attribute:: ref_frame
int, frame index to select frame from :attr:`Path.u_reference`
.. attribute:: paths
list of :class:`numpy.ndarray` objects representing the set/ensemble of
fitted trajectories
.. attribute:: D
:class:`numpy.ndarray` whichs store calculated distance matrix
.. Markup definitions
.. ------------------
..
.. |3Dp| replace:: :math:`N_p \times N \times 3`
.. |2Dp| replace:: :math:`N_p \times (3N)`
.. |3Dq| replace:: :math:`N_q \times N \times 3`
.. |2Dq| replace:: :math:`N_q \times (3N)`
.. |3D| replace:: :math:`N_p\times N\times 3`
.. |2D| replace:: :math:`N_p\times 3N`
.. |Np| replace:: :math:`N_p`
"""
import pickle
import os
import warnings
import numbers
import numpy as np
from scipy import spatial, cluster
from scipy.spatial.distance import directed_hausdorff
import matplotlib
import MDAnalysis
import MDAnalysis.analysis.align
from MDAnalysis import NoDataError
from MDAnalysis.lib.util import deprecate
import logging
logger = logging.getLogger('MDAnalysis.analysis.psa')
from ..due import due, Doi
due.cite(Doi("10.1371/journal.pcbi.1004568"),
description="Path Similarity Analysis algorithm and implementation",
path="MDAnalysis.analysis.psa",
cite_module=True)
del Doi
def get_path_metric_func(name):
"""Selects a path metric function by name.
Parameters
----------
name : str
name of path metric
Returns
-------
path_metric : function
The path metric function specified by *name* (if found).
"""
path_metrics = {
'hausdorff' : hausdorff,
'weighted_average_hausdorff' : hausdorff_wavg,
'average_hausdorff' : hausdorff_avg,
'hausdorff_neighbors' : hausdorff_neighbors,
'discrete_frechet' : discrete_frechet
}
try:
return path_metrics[name]
except KeyError as key:
errmsg = (f'Path metric "{key}" not found. Valid selections: '
f'{" ".join(n for n in path_metrics.keys())}')
raise KeyError(errmsg) from None
def sqnorm(v, axis=None):
"""Compute the sum of squares of elements along specified axes.
Parameters
----------
v : numpy.ndarray
coordinates
axes : None / int / tuple (optional)
Axes or axes along which a sum is performed. The default
(*axes* = ``None``) performs a sum over all the dimensions of
the input array. The value of *axes* may be negative, in
which case it counts from the last axis to the zeroth axis.
Returns
-------
float
the sum of the squares of the elements of `v` along `axes`
"""
return np.sum(v*v, axis=axis)
def get_msd_matrix(P, Q, axis=None):
r"""Generate the matrix of pairwise mean-squared deviations between paths.
The MSDs between all pairs of points in `P` and `Q` are
calculated, each pair having a point from `P` and a point from
`Q`.
`P` (`Q`) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). The pairwise MSD
matrix has dimensions :math:`N_p` by :math:`N_q`.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
msd_matrix : numpy.ndarray
matrix of pairwise MSDs between points in `P` and points
in `Q`
Notes
-----
We calculate the MSD matrix
.. math::
M_{ij} = ||p_i - q_j||^2
where :math:`p_i \in P` and :math:`q_j \in Q`.
"""
return np.asarray([sqnorm(p - Q, axis=axis) for p in P])
def reshaper(path, axis):
"""Flatten path when appropriate to facilitate calculations
requiring two dimensional input.
"""
if len(axis) > 1:
path = path.reshape(len(path), -1)
return path
def get_coord_axes(path):
"""Return the number of atoms and the axes corresponding to atoms
and coordinates for a given path.
The `path` is assumed to be a :class:`numpy.ndarray` where the 0th axis
corresponds to a frame (a snapshot of coordinates). The :math:`3N`
(Cartesian) coordinates are assumed to be either:
1. all in the 1st axis, starting with the x,y,z coordinates of the
first atom, followed by the *x*,*y*,*z* coordinates of the 2nd, etc.
2. in the 1st *and* 2nd axis, where the 1st axis indexes the atom
number and the 2nd axis contains the *x*,*y*,*z* coordinates of
each atom.
Parameters
----------
path : numpy.ndarray
representing a path
Returns
-------
(int, (int, ...))
the number of atoms and the axes containing coordinates
"""
path_dimensions = len(path.shape)
if path_dimensions == 3:
N = path.shape[1]
axis = (1,2) # 1st axis: atoms, 2nd axis: x,y,z coords
elif path_dimensions == 2:
# can use mod to check if total # coords divisible by 3
N = path.shape[1] / 3
axis = (1,) # 1st axis: 3N structural coords (x1,y1,z1,...,xN,xN,zN)
else:
raise ValueError("Path must have 2 or 3 dimensions; the first "
"dimensions (axis 0) must correspond to frames, "
"axis 1 (and axis 2, if present) must contain atomic "
"coordinates.")
return N, axis
def hausdorff(P, Q):
r"""Calculate the symmetric Hausdorff distance between two paths.
The metric used is RMSD, as opposed to the more conventional L2
(Euclidean) norm, because this is convenient for i.e., comparing
protein configurations.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form.
Note that reversing the path does not change the Hausdorff distance.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the Hausdorff distance between paths `P` and `Q`
Example
-------
Calculate the Hausdorff distance between two halves of a trajectory:
>>> from MDAnalysis.tests.datafiles import PSF, DCD
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = numpy.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = numpy.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> hausdorff(P,Q)
4.7786639840135905
>>> hausdorff(P,Q[::-1]) # hausdorff distance w/ reversed 2nd trajectory
4.7786639840135905
Notes
-----
:func:`scipy.spatial.distance.directed_hausdorff` is an optimized
implementation of the early break algorithm of [Taha2015]_; the
latter code is used here to calculate the symmetric Hausdorff
distance with an RMSD metric
References
----------
.. [Taha2015] A. A. Taha and A. Hanbury. An efficient algorithm for
calculating the exact Hausdorff distance. IEEE Transactions On Pattern
Analysis And Machine Intelligence, 37:2153-63, 2015.
"""
N_p, axis_p = get_coord_axes(P)
N_q, axis_q = get_coord_axes(Q)
if N_p != N_q:
raise ValueError("P and Q must have matching sizes")
P = reshaper(P, axis_p)
Q = reshaper(Q, axis_q)
return max(directed_hausdorff(P, Q)[0],
directed_hausdorff(Q, P)[0]) / np.sqrt(N_p)
def hausdorff_wavg(P, Q):
r"""Calculate the weighted average Hausdorff distance between two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form. The nearest
neighbor distances for *P* (to *Q*) and those of *Q* (to *P*) are averaged
individually to get the average nearest neighbor distance for *P* and
likewise for *Q*. These averages are then summed and divided by 2 to get a
measure that gives equal weight to *P* and *Q*.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the weighted average Hausdorff distance between paths `P` and `Q`
Example
-------
>>> from MDAnalysis import Universe
>>> from MDAnalysis.tests.datafiles import PSF, DCD
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = numpy.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = numpy.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> hausdorff_wavg(P,Q)
2.5669644353703447
>>> hausdorff_wavg(P,Q[::-1]) # weighted avg hausdorff dist w/ Q reversed
2.5669644353703447
Notes
-----
The weighted average Hausdorff distance is not a true metric (it does not
obey the triangle inequality); see [Seyler2015]_ for further details.
"""
N, axis = get_coord_axes(P)
d = get_msd_matrix(P, Q, axis=axis)
out = 0.5*( np.mean(np.amin(d,axis=0)) + np.mean(np.amin(d,axis=1)) )
return ( out / N )**0.5
def hausdorff_avg(P, Q):
r"""Calculate the average Hausdorff distance between two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form. The nearest
neighbor distances for *P* (to *Q*) and those of *Q* (to *P*) are all
averaged together to get a mean nearest neighbor distance. This measure
biases the average toward the path that has more snapshots, whereas weighted
average Hausdorff gives equal weight to both paths.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the average Hausdorff distance between paths `P` and `Q`
Example
-------
>>> from MDAnalysis.tests.datafiles import PSF, DCD
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = numpy.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = numpy.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> hausdorff_avg(P,Q)
2.5669646575869005
>>> hausdorff_avg(P,Q[::-1]) # hausdorff distance w/ reversed 2nd trajectory
2.5669646575869005
Notes
-----
The average Hausdorff distance is not a true metric (it does not obey the
triangle inequality); see [Seyler2015]_ for further details.
"""
N, axis = get_coord_axes(P)
d = get_msd_matrix(P, Q, axis=axis)
out = np.mean( np.append( np.amin(d,axis=0), np.amin(d,axis=1) ) )
return ( out / N )**0.5
def hausdorff_neighbors(P, Q):
r"""Find the Hausdorff neighbors of two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or |2Dp| (|2Dq|) in flattened form.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
dict
dictionary of two pairs of numpy arrays, the first pair (key
"frames") containing the indices of (Hausdorff) nearest
neighbors for `P` and `Q`, respectively, the second (key
"distances") containing (corresponding) nearest neighbor
distances for `P` and `Q`, respectively
Notes
-----
- Hausdorff neighbors are those points on the two paths that are separated by
the Hausdorff distance. They are the farthest nearest neighbors and are
maximally different in the sense of the Hausdorff distance [Seyler2015]_.
- :func:`scipy.spatial.distance.directed_hausdorff` can also provide the
hausdorff neighbors.
"""
N, axis = get_coord_axes(P)
d = get_msd_matrix(P, Q, axis=axis)
nearest_neighbors = {
'frames' : (np.argmin(d, axis=1), np.argmin(d, axis=0)),
'distances' : ((np.amin(d,axis=1)/N)**0.5, (np.amin(d, axis=0)/N)**0.5)
}
return nearest_neighbors
def discrete_frechet(P, Q):
r"""Calculate the discrete Fréchet distance between two paths.
*P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time
steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
:attr:`MDAnalysis.core.groups.AtomGroup.positions`). *P* (*Q*) has
either shape |3Dp| (|3Dq|), or :|2Dp| (|2Dq|) in flattened form.
Parameters
----------
P : numpy.ndarray
the points in the first path
Q : numpy.ndarray
the points in the second path
Returns
-------
float
the discrete Fréchet distance between paths *P* and *Q*
Example
-------
Calculate the discrete Fréchet distance between two halves of a
trajectory.
>>> u = Universe(PSF,DCD)
>>> mid = len(u.trajectory)/2
>>> ca = u.select_atoms('name CA')
>>> P = np.array([
... ca.positions for _ in u.trajectory[:mid:]
... ]) # first half of trajectory
>>> Q = np.array([
... ca.positions for _ in u.trajectory[mid::]
... ]) # second half of trajectory
>>> discrete_frechet(P,Q)
4.7786639840135905
>>> discrete_frechet(P,Q[::-1]) # frechet distance w/ 2nd trj reversed 2nd
6.8429011177113832
Note that reversing the direction increased the Fréchet distance:
it is sensitive to the direction of the path.
Notes
-----
The discrete Fréchet metric is an approximation to the continuous Fréchet
metric [Frechet1906]_ [Alt1995]_. The calculation of the continuous
Fréchet distance is implemented with the dynamic programming algorithm of
[EiterMannila1994]_ [EiterMannila1997]_.
References
----------
.. [Frechet1906] M. Fréchet. Sur quelques points du calcul
fonctionnel. Rend. Circ. Mat. Palermo, 22(1):1–72, Dec. 1906.
.. [Alt1995] H. Alt and M. Godau. Computing the Fréchet distance between
two polygonal curves. Int J Comput Geometry & Applications,
5(01n02):75–91, 1995. doi: `10.1142/S0218195995000064`_
.. _`10.1142/S0218195995000064`: http://doi.org/10.1142/S0218195995000064
.. [EiterMannila1994] T. Eiter and H. Mannila. Computing discrete Fréchet
distance. Technical Report CD-TR 94/64, Christian Doppler Laboratory for
Expert Systems, Technische Universität Wien, Wien, 1994.
.. [EiterMannila1997] T. Eiter and H. Mannila. Distance measures for point
sets and their computation. Acta Informatica, 34:109–133, 1997. doi: `10.1007/s002360050075`_.
.. _10.1007/s002360050075: http://doi.org/10.1007/s002360050075
"""
N, axis = get_coord_axes(P)
Np, Nq = len(P), len(Q)
d = get_msd_matrix(P, Q, axis=axis)
ca = -np.ones((Np, Nq))
def c(i, j):
"""Compute the coupling distance for two partial paths formed by *P* and
*Q*, where both begin at frame 0 and end (inclusive) at the respective
frame indices :math:`i-1` and :math:`j-1`. The partial path of *P* (*Q*)
up to frame *i* (*j*) is formed by the slicing ``P[0:i]`` (``Q[0:j]``).
:func:`c` is called recursively to compute the coupling distance
between the two full paths *P* and *Q* (i.e., the discrete Frechet
distance) in terms of coupling distances between their partial paths.
Parameters
----------
i : int
partial path of *P* through final frame *i-1*
j : int
partial path of *Q* through final frame *j-1*
Returns
-------
dist : float
the coupling distance between partial paths `P[0:i]` and `Q[0:j]`
"""
if ca[i,j] != -1 :
return ca[i,j]
if i > 0:
if j > 0:
ca[i,j] = max( min(c(i-1,j),c(i,j-1),c(i-1,j-1)), d[i,j] )
else:
ca[i,j] = max( c(i-1,0), d[i,0] )
elif j > 0:
ca[i,j] = max( c(0,j-1), d[0,j] )
else:
ca[i,j] = d[0,0]
return ca[i,j]
return (c(Np-1, Nq-1) / N)**0.5
def dist_mat_to_vec(N, i, j):
"""Convert distance matrix indices (in the upper triangle) to the index of
the corresponding distance vector.
This is a convenience function to locate distance matrix elements (and the
pair generating it) in the corresponding distance vector. The row index *j*
should be greater than *i+1*, corresponding to the upper triangle of the
distance matrix.
Parameters
----------
N : int
size of the distance matrix (of shape *N*-by-*N*)
i : int
row index (starting at 0) of the distance matrix
j : int
column index (starting at 0) of the distance matrix
Returns
-------
int
index (of the matrix element) in the corresponding distance vector
"""
if not (isinstance(N, numbers.Integral) and isinstance(i, numbers.Integral)
and isinstance(j, numbers.Integral)):
raise ValueError("N, i, j all must be of type int")
if i < 0 or j < 0 or N < 2:
raise ValueError("Matrix indices are invalid; i and j must be greater "
"than 0 and N must be greater the 2")
if (j > i and (i > N - 1 or j > N)) or (j < i and (i > N or j > N - 1)):
raise ValueError("Matrix indices are out of range; i and j must be "
"less than N = {0:d}".format(N))
if j > i:
return (N*i) + j - (i+2)*(i+1) // 2 # old-style division for int output
elif j < i:
warnings.warn("Column index entered (j = {:d} is smaller than row "
"index (i = {:d}). Using symmetric element in upper "
"triangle of distance matrix instead: i --> j, "
"j --> i".format(j, i))
return (N*j) + i - (j+2)*(j+1) // 2 # old-style division for int output
else:
raise ValueError("Error in processing matrix indices; i and j must "
"be integers less than integer N = {0:d} such that"
" j >= i+1.".format(N))
class Path(object):
"""Represent a path based on a :class:`~MDAnalysis.core.universe.Universe`.
Pre-process a :class:`Universe` object: (1) fit the trajectory to a
reference structure, (2) convert fitted time series to a
:class:`numpy.ndarray` representation of :attr:`Path.path`.
The analysis is performed with :meth:`PSAnalysis.run` and stores the result
in the :class:`numpy.ndarray` distance matrix :attr:`PSAnalysis.D`.
:meth:`PSAnalysis.run` also generates a fitted trajectory and path from
alignment of the original trajectories to a reference structure.
.. versionadded:: 0.9.1
"""
def __init__(self, universe, reference, select='name CA',
path_select='all', ref_frame=0):
"""Setting up trajectory alignment and fitted path generation.
Parameters
----------
universe : Universe
:class:`MDAnalysis.Universe` object containing a trajectory
reference : Universe
reference structure (uses `ref_frame` from the trajectory)
select : str or dict or tuple (optional)
The selection to operate on for rms fitting; can be one of:
1. any valid selection string for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` that
produces identical selections in *mobile* and *reference*; or
2. a dictionary ``{'mobile':sel1, 'reference':sel2}`` (the
:func:`MDAnalysis.analysis.align.fasta2select` function returns
such a dictionary based on a ClustalW_ or STAMP_ sequence
alignment); or
3. a tuple ``(sel1, sel2)``
When using 2. or 3. with *sel1* and *sel2* then these selections
can also each be a list of selection strings (to generate an
AtomGroup with defined atom order as described under
:ref:`ordered-selections-label`).
ref_frame : int
frame index to select the coordinate frame from
`select.trajectory`
path_select : selection_string
atom selection composing coordinates of (fitted) path; if ``None``
then `path_select` is set to `select` [``None``]
"""
self.u_original = universe
self.u_reference = reference
self.select = select
self.ref_frame = ref_frame
self.path_select = path_select
self.top_name = self.u_original.filename
self.trj_name = self.u_original.trajectory.filename
self.newtrj_name = None
self.u_fitted = None
self.path = None
self.natoms = None
def fit_to_reference(self, filename=None, prefix='', postfix='_fit',
rmsdfile=None, targetdir=os.path.curdir,
weights=None, tol_mass=0.1):
"""Align each trajectory frame to the reference structure
Parameters
----------
filename : str (optional)
file name for the RMS-fitted trajectory or pdb; defaults to the
original trajectory filename (from :attr:`Path.u_original`) with
`prefix` prepended
prefix : str (optional)
prefix for auto-generating the new output filename
rmsdfile : str (optional)
file name for writing the RMSD time series [``None``]
weights : {"mass", ``None``} or array_like (optional)
choose weights. With ``"mass"`` uses masses as weights; with
``None`` weigh each atom equally. If a float array of the same
length as the selected AtomGroup is provided, use each element of
the `array_like` as a weight for the corresponding atom in the
AtomGroup.
tol_mass : float (optional)
Reject match if the atomic masses for matched atoms differ by more
than `tol_mass` [0.1]
Returns
-------
Universe
:class:`MDAnalysis.Universe` object containing a fitted trajectory
Notes
-----
Uses :class:`MDAnalysis.analysis.align.AlignTraj` for the fitting.
.. deprecated:: 0.16.1
Instead of ``mass_weighted=True`` use new ``weights='mass'``;
refactored to fit with AnalysisBase API
.. versionchanged:: 0.17.0
Deprecated keyword `mass_weighted` was removed.
"""
head, tail = os.path.split(self.trj_name)
oldname, ext = os.path.splitext(tail)
filename = filename or oldname
self.newtrj_name = os.path.join(targetdir, filename + postfix + ext)
self.u_reference.trajectory[self.ref_frame] # select frame from ref traj
aligntrj = MDAnalysis.analysis.align.AlignTraj(self.u_original,
self.u_reference,
select=self.select,
filename=self.newtrj_name,
prefix=prefix,
weights=weights,
tol_mass=tol_mass).run()
if rmsdfile is not None:
aligntrj.save(rmsdfile)
return MDAnalysis.Universe(self.top_name, self.newtrj_name)
def to_path(self, fitted=False, select=None, flat=False):
r"""Generates a coordinate time series from the fitted universe
trajectory.
Given a selection of *N* atoms from *select*, the atomic positions for
each frame in the fitted universe (:attr:`Path.u_fitted`) trajectory
(with |Np| total frames) are appended sequentially to form a 3D or 2D
(if *flat* is ``True``) :class:`numpy.ndarray` representation of the
fitted trajectory (with dimensions |3D| or |2D|, respectively).
Parameters
----------
fitted : bool (optional)
construct a :attr:`Path.path` from the :attr:`Path.u_fitted`
trajectory; if ``False`` then :attr:`Path.path` is generated with
the trajectory from :attr:`Path.u_original` [``False``]
select : str (optional)
the selection for constructing the coordinates of each frame in
:attr:`Path.path`; if ``None`` then :attr:`Path.path_select`
is used, else it is overridden by *select* [``None``]
flat : bool (optional)
represent :attr:`Path.path` as a 2D (|2D|) :class:`numpy.ndarray`;
if ``False`` then :attr:`Path.path` is a 3D (|3D|)
:class:`numpy.ndarray` [``False``]
Returns
-------
numpy.ndarray
representing a time series of atomic positions of an
:class:`MDAnalysis.core.groups.AtomGroup` selection from
:attr:`Path.u_fitted.trajectory`
"""
select = select if select is not None else self.path_select
if fitted:
if not isinstance(self.u_fitted, MDAnalysis.Universe):
raise TypeError("Fitted universe not found. Generate a fitted " +
"universe with fit_to_reference() first, or explicitly "+
"set argument \"fitted\" to \"False\" to generate a " +
"path from the original universe.")
u = self.u_fitted
else:
u = self.u_original
frames = u.trajectory
atoms = u.select_atoms(select)
self.natoms = len(atoms)
frames.rewind()
if flat:
return np.array([atoms.positions.flatten() for _ in frames])
else:
return np.array([atoms.positions for _ in frames])
def run(self, align=False, filename=None, postfix='_fit', rmsdfile=None,
targetdir=os.path.curdir, weights=None, tol_mass=0.1,
flat=False):
r"""Generate a path from a trajectory and reference structure.
As part of the path generation, the trajectory can be superimposed
("aligned") to a reference structure if specified.
This is a convenience method to generate a fitted trajectory from an
inputted universe (:attr:`Path.u_original`) and reference structure
(:attr:`Path.u_reference`). :meth:`Path.fit_to_reference` and
:meth:`Path.to_path` are used consecutively to generate a new universe
(:attr:`Path.u_fitted`) containing the fitted trajectory along with the
corresponding :attr:`Path.path` represented as an
:class:`numpy.ndarray`. The method returns a tuple of the topology name
and new trajectory name, which can be fed directly into an
:class:`MDAnalysis.Universe` object after unpacking the tuple using the
``*`` operator, as in
``MDAnalysis.Universe(*(top_name, newtraj_name))``.
Parameters
----------
align : bool (optional)
Align trajectory to atom selection :attr:`Path.select` of
:attr:`Path.u_reference`. If ``True``, a universe containing an
aligned trajectory is produced with :meth:`Path.fit_to_reference`
[``False``]
filename : str (optional)
filename for the RMS-fitted trajectory or pdb; defaults to the
original trajectory filename (from :attr:`Path.u_original`) with
*prefix* prepended
postfix : str (optional)
prefix for auto-generating the new output filename
rmsdfile : str (optional)
file name for writing the RMSD time series [``None``]
weights : {"mass", ``None``} or array_like (optional)
choose weights. With ``"mass"`` uses masses as weights; with
``None`` weigh each atom equally. If a float array of the same
length as the selected AtomGroup is provided, use each element of
the `array_like` as a weight for the corresponding atom in the
AtomGroup.
tol_mass : float (optional)
Reject match if the atomic masses for matched atoms differ by more
than *tol_mass* [0.1]
flat : bool (optional)
represent :attr:`Path.path` with 2D (|2D|) :class:`numpy.ndarray`;
if ``False`` then :attr:`Path.path` is a 3D (|3D|)
:class:`numpy.ndarray` [``False``]
Returns
-------
topology_trajectory : tuple
A tuple of the topology name and new trajectory name.
.. deprecated:: 0.16.1
Instead of ``mass_weighted=True`` use new ``weights='mass'``;
refactored to fit with AnalysisBase API
.. versionchanged:: 0.17.0
Deprecated keyword `mass_weighted` was removed.
"""
if align:
self.u_fitted = self.fit_to_reference(
filename=filename, postfix=postfix,
rmsdfile=rmsdfile, targetdir=targetdir,
weights=weights, tol_mass=0.1)
self.path = self.to_path(fitted=align, flat=flat)
return self.top_name, self.newtrj_name
def get_num_atoms(self):
"""Return the number of atoms used to construct the :class:`Path`.
Must run :meth:`Path.to_path` prior to calling this method.
Returns
-------
int
the number of atoms in the :class:`Path`
"""
if self.natoms is None:
raise ValueError("No path data; do 'Path.to_path()' first.")
return self.natoms
class PSAPair(object):
"""Generate nearest neighbor and Hausdorff pair information between a pair
of paths from an all-pairs comparison generated by :class:`PSA`.
The nearest neighbors for each path of a pair of paths is generated by
:meth:`PSAPair.compute_nearest_neighbors` and stores the result
in a dictionary (:attr:`nearest_neighbors`): each path has a
:class:`numpy.ndarray` of the frames of its nearest neighbors, and a
:class:`numpy.ndarray` of its nearest neighbor distances
:attr:`PSAnalysis.D`. For example, *nearest_neighbors['frames']* is a pair
of :class:`numpy.ndarray`, the first being the frames of the nearest
neighbors of the first path, *i*, the second being those of the second path,
*j*.
The Hausdorff pair for the pair of paths is found by calling
:meth:`find_hausdorff_pair` (locates the nearest neighbor pair having the
largest overall distance separating them), which stores the result in a
dictionary (:attr:`hausdorff_pair`) containing the frames (indices) of the
pair along with the corresponding (Hausdorff) distance.
*hausdorff_pair['frame']* contains a pair of frames in the first path, *i*,
and the second path, *j*, respectively, that correspond to the Hausdorff
distance between them.
.. versionadded:: 0.11
"""
def __init__(self, npaths, i, j):
"""Set up a :class:`PSAPair` for a pair of paths that are part of a
:class:`PSA` comparison of *npaths* total paths.
Each unique pair of paths compared using :class:`PSA` is related by
their nearest neighbors (and corresponding distances) and the Hausdorff
pair and distance. :class:`PSAPair` is a convenience class for
calculating and encapsulating nearest neighbor and Hausdorff pair
information for one pair of paths.
Given *npaths*, :class:`PSA` performs and all-pairs comparison among all
paths for a total of :math:`\text{npaths}*(\text{npaths}-1)/2` unique
comparisons. If distances between paths are computed, the all-pairs
comparison can be summarized in a symmetric distance matrix whose upper
triangle can be mapped to a corresponding distance vector form in a
one-to-one manner. A particular comparison of a pair of paths in a
given instance of :class:`PSAPair` is thus unique identified by the row
and column indices in the distance matrix representation (whether or not
distances are actually computed), or a single ID (index) in the
corresponding distance vector.
Parameters
----------
npaths : int
total number of paths in :class:`PSA` used to generate *this*
:class:`PSAPair`
i : int
row index (starting at 0) of the distance matrix
j : int
column index (starting at 0) of the distance matrix
"""
self.npaths = npaths
self.matrix_idx = (i,j)
self.pair_idx = self._dvec_idx(i,j)
# Set by calling hausdorff_nn
self.nearest_neighbors = {'frames' : None, 'distances' : None}
# Set by self.getHausdorffPair
self.hausdorff_pair = {'frames' : (None, None), 'distance' : None}
def _dvec_idx(self, i, j):
"""Convert distance matrix indices (in the upper triangle) to the index
of the corresponding distance vector.
This is a convenience function to locate distance matrix elements (and
the pair generating it) in the corresponding distance vector. The row
index *j* should be greater than *i+1*, corresponding to the upper
triangle of the distance matrix.
Parameters
----------
i : int
row index (starting at 0) of the distance matrix
j : int
column index (starting at 0) of the distance matrix
Returns
-------
int
(matrix element) index in the corresponding distance vector
"""
return (self.npaths*i) + j - (i+2)*(i+1)/2
def compute_nearest_neighbors(self, P,Q, N=None):
"""Generates Hausdorff nearest neighbor lists of *frames* (by index) and
*distances* for *this* pair of paths corresponding to distance matrix
indices (*i*,*j*).
:meth:`PSAPair.compute_nearest_neighbors` calls
:func:`hausdorff_neighbors` to populate the dictionary of the nearest
neighbor lists of frames (by index) and distances
(:attr:`PSAPair.nearest_neighbors`). This method must explicitly take as
arguments a pair of paths, *P* and *Q*, where *P* is the
:math:`i^\text{th}` path and *Q* is the :math:`j^\text{th}` path among
the set of *N* total paths in the comparison.
Parameters
----------
P : numpy.ndarray
representing a path
Q : numpy.ndarray
representing a path
N : int
size of the distance matrix (of shape *N*-by-*N*) [``None``]
"""
hn = hausdorff_neighbors(P, Q)
self.nearest_neighbors['frames'] = hn['frames']
self.nearest_neighbors['distances'] = hn['distances']
def find_hausdorff_pair(self):
r"""Find the Hausdorff pair (of frames) for *this* pair of paths.
:meth:`PSAPair.find_hausdorff_pair` requires that
`:meth:`PSAPair.compute_nearest_neighbors` be called first to
generate the nearest neighbors (and corresponding distances) for each
path in *this* :class:`PSAPair`. The Hausdorff pair is the nearest
neighbor pair (of snapshots/frames), one in the first path and one in
the second, with the largest separation distance.
"""
if self.nearest_neighbors['distances'] is None:
raise NoDataError("Nearest neighbors have not been calculated yet;"
" run compute_nearest_neighbors() first.")
nn_idx_P, nn_idx_Q = self.nearest_neighbors['frames']
nn_dist_P, nn_dist_Q = self.nearest_neighbors['distances']
max_nn_dist_P = max(nn_dist_P)
max_nn_dist_Q = max(nn_dist_Q)
if max_nn_dist_P > max_nn_dist_Q:
max_nn_idx_P = np.argmax(nn_dist_P)
self.hausdorff_pair['frames'] = max_nn_idx_P, nn_idx_P[max_nn_idx_P]
self.hausdorff_pair['distance'] = max_nn_dist_P
else:
max_nn_idx_Q = np.argmax(nn_dist_Q)
self.hausdorff_pair['frames'] = nn_idx_Q[max_nn_idx_Q], max_nn_idx_Q
self.hausdorff_pair['distance'] = max_nn_dist_Q
def get_nearest_neighbors(self, frames=True, distances=True):
"""Returns the nearest neighbor frame indices, distances, or both, for
each path in *this* :class:`PSAPair`.
:meth:`PSAPair.get_nearest_neighbors` requires that the nearest
neighbors (:attr:`nearest_neighbors`) be initially computed by first
calling :meth:`compute_nearest_neighbors`. At least one of *frames*
or *distances* must be ``True``, or else a ``NoDataError`` is raised.
Parameters
----------
frames : bool
if ``True``, return nearest neighbor frame indices
[``True``]
distances : bool
if ``True``, return nearest neighbor distances [``True``]
Returns
-------
dict or tuple
If both *frames* and *distances* are ``True``, return the entire
dictionary (:attr:`nearest_neighbors`); if only *frames* is
``True``, return a pair of :class:`numpy.ndarray` containing the
indices of the frames (for the pair of paths) of the nearest
neighbors; if only *distances* is ``True``, return a pair of
:class:`numpy.ndarray` of the nearest neighbor distances (for the
pair of paths).
"""
if self.nearest_neighbors['distances'] is None:
raise NoDataError("Nearest neighbors have not been calculated yet;"
" run compute_nearest_neighbors() first.")
if frames:
if distances:
return self.nearest_neighbors
else:
return self.nearest_neighbors['frames']
elif distances:
return self.nearest_neighbors['distances']
else:
raise NoDataError('Need to select Hausdorff pair "frames" or'
' "distances" or both. "frames" and "distances"'
' cannot both be set to False.')
def get_hausdorff_pair(self, frames=True, distance=True):
"""Returns the Hausdorff pair of frames indices, the Hausdorff distance,
or both, for the paths in *this* :class:`PSAPair`.
:meth:`PSAPair.get_hausdorff_pair` requires that the Hausdorff pair
(and distance) be initially found by first calling
:meth:`find_hausdorff_pair`. At least one of *frames* or *distance*
must be ``True``, or else a ``NoDataError`` is raised.
Parameters
----------
frames : bool
if ``True``, return the indices of the frames
of the Hausdorff pair [``True``]
distances : bool
if ``True``, return Hausdorff distance [``True``]
Returns
-------
dict or tuple
If both *frames* and *distance* are ``True``, return the entire
dictionary (:attr:`hausdorff_pair`); if only *frames* is
``True``, return a pair of ``int`` containing the indices of the
frames (one index per path) of the Hausdorff pair; if only *distance*
is ``True``, return the Hausdorff distance for this path pair.
"""
if self.hausdorff_pair['distance'] is None:
raise NoDataError("Hausdorff pair has not been calculated yet;"
" run find_hausdorff_pair() first.")
if frames:
if distance:
return self.hausdorff_pair
else:
return self.hausdorff_pair['frames']
elif distance:
return self.hausdorff_pair['distance']
else:
raise NoDataError('Need to select Hausdorff pair "frames" or'
' "distance" or both. "frames" and "distance"'
' cannot both be set to False.')
class PSAnalysis(object):
"""Perform Path Similarity Analysis (PSA) on a set of trajectories.
The analysis is performed with :meth:`PSAnalysis.run` and stores the result
in the :class:`numpy.ndarray` distance matrix :attr:`PSAnalysis.D`.
:meth:`PSAnalysis.run` also generates a fitted trajectory and path from
alignment of the original trajectories to a reference structure.
.. versionadded:: 0.8
.. versionchanged:: 1.0.0
``save_result()`` method has been removed. You can use ``np.save()`` on
:attr:`PSAnalysis.D` instead.
"""
def __init__(self, universes, reference=None, select='name CA',
ref_frame=0, path_select=None, labels=None,
targetdir=os.path.curdir):
"""Setting up Path Similarity Analysis.
The mutual similarity between all unique pairs of trajectories
are computed using a selected path metric.
Parameters
----------
universes : list
a list of universes (:class:`MDAnalysis.Universe` object), each
containing a trajectory
reference : Universe
reference coordinates; :class:`MDAnalysis.Universe` object; if
``None`` the first time step of the first item in `universes` is used
[``None``]
select : str or dict or tuple
The selection to operate on; can be one of:
1. any valid selection string for
:meth:`~MDAnalysis.core.groups.AtomGroup.select_atoms` that
produces identical selections in *mobile* and *reference*; or
2. a dictionary ``{'mobile':sel1, 'reference':sel2}`` (the
:func:`MDAnalysis.analysis.align.fasta2select` function returns
such a dictionary based on a ClustalW_ or STAMP_ sequence
alignment); or
3. a tuple ``(sel1, sel2)``
When using 2. or 3. with *sel1* and *sel2* then these selections
can also each be a list of selection strings (to generate an
AtomGroup with defined atom order as described under
:ref:`ordered-selections-label`).
tol_mass : float
Reject match if the atomic masses for matched atoms differ by more
than *tol_mass* [0.1]
ref_frame : int
frame index to select frame from *reference* [0]
path_select : str
atom selection composing coordinates of (fitted) path; if ``None``
then *path_select* is set to *select* [``None``]
targetdir : str
output files are saved there; if ``None`` then "./psadata" is
created and used [.]
labels : list
list of strings, names of trajectories to be analyzed
(:class:`MDAnalysis.Universe`); if ``None``, defaults to trajectory
names [``None``]
.. _ClustalW: http://www.clustal.org/
.. _STAMP: http://www.compbio.dundee.ac.uk/manuals/stamp.4.2/
"""
self.universes = universes
self.u_reference = self.universes[0] if reference is None else reference
self.select = select
self.ref_frame = ref_frame
self.path_select = self.select if path_select is None else path_select
if targetdir is None:
try:
targetdir = os.path.join(os.path.curdir, 'psadata')
os.makedirs(targetdir)
except OSError:
if not os.path.isdir(targetdir):
raise
self.targetdir = os.path.realpath(targetdir)
# Set default directory names for storing topology/reference structures,
# fitted trajectories, paths, distance matrices, and plots
self.datadirs = {'fitted_trajs' : 'fitted_trajs',
'paths' : 'paths',
'distance_matrices' : 'distance_matrices',
'plots' : 'plots'}
for dir_name, directory in self.datadirs.items():
try:
full_dir_name = os.path.join(self.targetdir, dir_name)
os.makedirs(full_dir_name)
except OSError:
if not os.path.isdir(full_dir_name):
raise
# Keep track of topology, trajectory, and related files
trj_names = []
for i, u in enumerate(self.universes):
head, tail = os.path.split(u.trajectory.filename)
filename, ext = os.path.splitext(tail)
trj_names.append(filename)
self.trj_names = trj_names
self.fit_trj_names = None
self.path_names = None
self.top_name = self.universes[0].filename if len(universes) != 0 else None
self.labels = labels or self.trj_names
# Names of persistence (pickle) files where topology and trajectory
# filenames are stored--should not be modified by user
self._top_pkl = os.path.join(self.targetdir, "psa_top-name.pkl")
self._trjs_pkl = os.path.join(self.targetdir, "psa_orig-traj-names.pkl")
self._fit_trjs_pkl = os.path.join(self.targetdir, "psa_fitted-traj-names.pkl")
self._paths_pkl = os.path.join(self.targetdir, "psa_path-names.pkl")
self._labels_pkl = os.path.join(self.targetdir, "psa_labels.pkl")
# Pickle topology and trajectory filenames for this analysis to curdir
with open(self._top_pkl, 'wb') as output:
pickle.dump(self.top_name, output)
with open(self._trjs_pkl, 'wb') as output:
pickle.dump(self.trj_names, output)
with open(self._labels_pkl, 'wb') as output:
pickle.dump(self.labels, output)
self.natoms = None
self.npaths = None
self.paths = None
self.D = None # pairwise distances
self._HP = None # (distance vector order) list of all Hausdorff pairs
self._NN = None # (distance vector order) list of all nearest neighbors
self._psa_pairs = None # (distance vector order) list of all PSAPairs
def generate_paths(self, align=False, filename=None, infix='', weights=None,
tol_mass=False, ref_frame=None, flat=False, save=True, store=False):
"""Generate paths, aligning each to reference structure if necessary.
Parameters
----------
align : bool
Align trajectories to atom selection :attr:`PSAnalysis.select`
of :attr:`PSAnalysis.u_reference` [``False``]
filename : str
strings representing base filename for fitted trajectories and
paths [``None``]
infix : str
additional tag string that is inserted into the output filename of
the fitted trajectory files ['']
weights : {"mass", ``None``} or array_like (optional)
choose weights. With ``"mass"`` uses masses as weights; with
``None`` weigh each atom equally. If a float array of the same
length as the selected AtomGroup is provided, use each element of
the `array_like` as a weight for the corresponding atom in the
AtomGroup [``None``]
tol_mass : float
Reject match if the atomic masses for matched atoms differ by more
than *tol_mass* [``False``]
ref_frame : int
frame index to select frame from *reference* [``None``]
flat : bool
represent :attr:`Path.path` as a 2D (|2D|) :class:`numpy.ndarray`;
if ``False`` then :attr:`Path.path` is a 3D (|3D|)
:class:`numpy.ndarray` [``False``]
save : bool
if ``True``, pickle list of names for fitted trajectories
[``True``]
store : bool
if ``True`` then writes each path (:class:`numpy.ndarray`)
in :attr:`PSAnalysis.paths` to compressed npz (numpy) files
[``False``]
The fitted trajectories are written to new files in the
"/trj_fit" subdirectory in :attr:`PSAnalysis.targetdir` named
"filename(*trajectory*)XXX*infix*_psa", where "XXX" is a number between
000 and 999; the extension of each file is the same as its original.
Optionally, the trajectories can also be saved in numpy compressed npz
format in the "/paths" subdirectory in :attr:`PSAnalysis.targetdir` for
persistence and can be accessed as the attribute
:attr:`PSAnalysis.paths`.
.. deprecated:: 0.16.1
Instead of ``mass_weighted=True`` use new ``weights='mass'``;
refactored to fit with AnalysisBase API
.. versionchanged:: 0.17.0
Deprecated keyword `mass_weighted` was removed.
.. versionchanged:: 1.0.0
Defaults for the `store` and `filename` keywords have been changed
from `True` and `fitted` to `False` and `None` respectively. These
now match the docstring documented defaults.
"""
if ref_frame is None:
ref_frame = self.ref_frame
paths = []
fit_trj_names = []
for i, u in enumerate(self.universes):
p = Path(u, self.u_reference, select=self.select,
path_select=self.path_select, ref_frame=ref_frame)
trj_dir = os.path.join(self.targetdir, self.datadirs['fitted_trajs'])
postfix = '{0}{1}{2:03n}'.format(infix, '_psa', i+1)
top_name, fit_trj_name = p.run(align=align, filename=filename,
postfix=postfix,
targetdir=trj_dir,
weights=weights,
tol_mass=tol_mass, flat=flat)
paths.append(p.path)
fit_trj_names.append(fit_trj_name)
self.natoms, axis = get_coord_axes(paths[0])
self.paths = paths
self.npaths = len(paths)
self.fit_trj_names = fit_trj_names
if save:
with open(self._fit_trjs_pkl, 'wb') as output:
pickle.dump(self.fit_trj_names, output)
if store:
self.save_paths(filename=filename)
def run(self, **kwargs):
"""Perform path similarity analysis on the trajectories to compute
the distance matrix.
A number of parameters can be changed from the defaults. The
result is stored as the array :attr:`PSAnalysis.D`.
Parameters
----------
metric : str or callable
selection string specifying the path metric to measure pairwise
distances among :attr:`PSAnalysis.paths` or a callable with the
same call signature as :func:`hausdorff`
[``'hausdorff'``]
start : int
`start` and `stop` frame index with `step` size: analyze
``trajectory[start:stop:step]`` [``None``]
stop : int
step : int
.. versionchanged:: 1.0.0
`store` and `filename` have been removed.
"""
metric = kwargs.pop('metric', 'hausdorff')
start = kwargs.pop('start', None)
stop = kwargs.pop('stop', None)
step = kwargs.pop('step', None)
if isinstance(metric, str):
metric_func = get_path_metric_func(str(metric))
else:
metric_func = metric
numpaths = self.npaths
D = np.zeros((numpaths,numpaths))
for i in range(0, numpaths-1):
for j in range(i+1, numpaths):
P = self.paths[i][start:stop:step]
Q = self.paths[j][start:stop:step]
D[i,j] = metric_func(P, Q)
D[j,i] = D[i,j]
self.D = D
def run_pairs_analysis(self, **kwargs):
"""Perform PSA Hausdorff (nearest neighbor) pairs analysis on all unique
pairs of paths in :attr:`PSAnalysis.paths`.
Partial results can be stored in separate lists, where each list is
indexed according to distance vector convention (i.e., element *(i,j)*
in distance matrix representation corresponds to element
:math:`s=N*i+j-(i+1)*(i+2)` in distance vector representation, which is
the :math:`s^\text{th}` comparison). For each unique pair of paths, the
nearest neighbors for that pair can be stored in :attr:`NN` and the
Hausdorff pair in :attr:`HP`. :attr:`PP` stores the full information
of Hausdorff pairs analysis that is available for each pair of path,
including nearest neighbors lists and the Hausdorff pairs.
The pairwise distances are stored as the array :attr:`PSAnalysis.D`.
Parameters
----------
start : int
`start` and `stop` frame index with `step` size: analyze
``trajectory[start:stop:step]`` [``None``]
stop : int
step : int
neighbors : bool
if ``True``, then stores dictionary of nearest neighbor
frames/distances in :attr:`PSAnalysis.NN` [``False``]
hausdorff_pairs : bool
if ``True``, then stores dictionary of Hausdorff pair
frames/distances in :attr:`PSAnalysis.HP` [``False``]
"""
start = kwargs.pop('start', None)
stop = kwargs.pop('stop', None)
step = kwargs.pop('step', None)
neighbors = kwargs.pop('neighbors', False)
hausdorff_pairs = kwargs.pop('hausdorff_pairs', False)
numpaths = self.npaths
D = np.zeros((numpaths,numpaths))
self._NN = [] # list of nearest neighbors pairs
self._HP = [] # list of Hausdorff pairs
self._psa_pairs = [] # list of PSAPairs
for i in range(0, numpaths-1):
for j in range(i+1, numpaths):
pp = PSAPair(i, j, numpaths)
P = self.paths[i][start:stop:step]
Q = self.paths[j][start:stop:step]
pp.compute_nearest_neighbors(P, Q, self.natoms)
pp.find_hausdorff_pair()
D[i,j] = pp.hausdorff_pair['distance']
D[j,i] = D[i,j]
self._psa_pairs.append(pp)
if neighbors:
self._NN.append(pp.get_nearest_neighbors())
if hausdorff_pairs:
self._HP.append(pp.get_hausdorff_pair())
self.D = D
def save_paths(self, filename=None):
"""Save fitted :attr:`PSAnalysis.paths` to numpy compressed npz files.
The data are saved with :func:`numpy.savez_compressed` in the directory
specified by :attr:`PSAnalysis.targetdir`.
Parameters
----------
filename : str
specifies filename [``None``]
Returns
-------
filename : str
See Also
--------
load
"""
filename = filename or 'path_psa'
head = os.path.join(self.targetdir, self.datadirs['paths'])
outfile = os.path.join(head, filename)
if self.paths is None:
raise NoDataError("Paths have not been calculated yet")
path_names = []
for i, path in enumerate(self.paths):
current_outfile = "{0}{1:03n}.npy".format(outfile, i+1)
np.save(current_outfile, self.paths[i])
path_names.append(current_outfile)
logger.info("Wrote path to file %r", current_outfile)
self.path_names = path_names
with open(self._paths_pkl, 'wb') as output:
pickle.dump(self.path_names, output)
return filename
def load(self):
"""Load fitted paths specified by 'psa_path-names.pkl' in
:attr:`PSAnalysis.targetdir`.
All filenames are determined by :class:`PSAnalysis`.
See Also
--------
save_paths
"""
if not os.path.exists(self._paths_pkl):
raise NoDataError("Fitted trajectories cannot be loaded; save file" +
"{0} does not exist.".format(self._paths_pkl))
self.path_names = np.load(self._paths_pkl, allow_pickle=True)
self.paths = [np.load(pname) for pname in self.path_names]
if os.path.exists(self._labels_pkl):
self.labels = np.load(self._labels_pkl, allow_pickle=True)
logger.info("Loaded paths from %r", self._paths_pkl)
def plot(self, filename=None, linkage='ward', count_sort=False,
distance_sort=False, figsize=4.5, labelsize=12):
"""Plot a clustered distance matrix.
Usese method *linkage* and plots the corresponding dendrogram. Rows
(and columns) are identified using the list of strings specified by
:attr:`PSAnalysis.labels`.
If `filename` is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`matplotlib.pyplot.matshow`.
Parameters
----------
filename : str
save figure to *filename* [``None``]
linkage : str
name of linkage criterion for clustering [``'ward'``]
count_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
distance_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
figsize : float
set the vertical size of plot in inches [``4.5``]
labelsize : float
set the font size for colorbar labels; font size for path labels on
dendrogram default to 3 points smaller [``12``]
Returns
-------
Z
`Z` from :meth:`cluster`
dgram
`dgram` from :meth:`cluster`
dist_matrix_clus
clustered distance matrix (reordered)
.. versionchanged:: 1.0.0
:attr:`tick1On`, :attr:`tick2On`, :attr:`label1On` and :attr:`label2On`
changed to :attr:`tick1line`, :attr:`tick2line`, :attr:`label1` and
:attr:`label2` due to upstream deprecation (see #2493)
"""
from matplotlib.pyplot import figure, colorbar, cm, savefig, clf
if self.D is None:
raise ValueError(
"No distance data; do 'PSAnalysis.run()' first.")
npaths = len(self.D)
dist_matrix = self.D
dgram_loc, hmap_loc, cbar_loc = self._get_plot_obj_locs()
aspect_ratio = 1.25
clf()
fig = figure(figsize=(figsize*aspect_ratio, figsize))
ax_hmap = fig.add_axes(hmap_loc)
ax_dgram = fig.add_axes(dgram_loc)
Z, dgram = self.cluster(method=linkage, \
count_sort=count_sort, \
distance_sort=distance_sort)
rowidx = colidx = dgram['leaves'] # get row-wise ordering from clustering
ax_dgram.invert_yaxis() # Place origin at up left (from low left)
minDist, maxDist = 0, np.max(dist_matrix)
dist_matrix_clus = dist_matrix[rowidx,:]
dist_matrix_clus = dist_matrix_clus[:,colidx]
im = ax_hmap.matshow(dist_matrix_clus, aspect='auto', origin='lower', \
cmap=cm.YlGn, vmin=minDist, vmax=maxDist)
ax_hmap.invert_yaxis() # Place origin at upper left (from lower left)
ax_hmap.locator_params(nbins=npaths)
ax_hmap.set_xticks(np.arange(npaths), minor=True)
ax_hmap.set_yticks(np.arange(npaths), minor=True)
ax_hmap.tick_params(axis='x', which='both', labelleft='off', \
labelright='off', labeltop='on', labelsize=0)
ax_hmap.tick_params(axis='y', which='both', labelleft='on', \
labelright='off', labeltop='off', labelsize=0)
rowlabels = [self.labels[i] for i in rowidx]
collabels = [self.labels[i] for i in colidx]
ax_hmap.set_xticklabels(collabels, rotation='vertical', \
size=(labelsize-4), multialignment='center', minor=True)
ax_hmap.set_yticklabels(rowlabels, rotation='horizontal', \
size=(labelsize-4), multialignment='left', ha='right', \
minor=True)
ax_color = fig.add_axes(cbar_loc)
colorbar(im, cax=ax_color, ticks=np.linspace(minDist, maxDist, 10), \
format="%0.1f")
ax_color.tick_params(labelsize=labelsize)
# Remove major ticks and labels from both heat map axes
for tic in ax_hmap.xaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
tic.label1.set_visible(False)
tic.label2.set_visible(False)
for tic in ax_hmap.yaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
tic.label1.set_visible(False)
tic.label2.set_visible(False)
# Remove minor ticks from both heat map axes
for tic in ax_hmap.xaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
for tic in ax_hmap.yaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
# Remove tickmarks from colorbar
for tic in ax_color.yaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
if filename is not None:
head = os.path.join(self.targetdir, self.datadirs['plots'])
outfile = os.path.join(head, filename)
savefig(outfile, dpi=300, bbox_inches='tight')
return Z, dgram, dist_matrix_clus
def plot_annotated_heatmap(self, filename=None, linkage='ward', \
count_sort=False, distance_sort=False, \
figsize=8, annot_size=6.5):
"""Plot a clustered distance matrix.
Uses method `linkage` and plots annotated distances in the matrix. Rows
(and columns) are identified using the list of strings specified by
:attr:`PSAnalysis.labels`.
If `filename` is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`matplotlib.pyplot.imshow`.
Parameters
----------
filename : str
save figure to *filename* [``None``]
linkage : str
name of linkage criterion for clustering [``'ward'``]
count_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
distance_sort : bool
see :func:`scipy.cluster.hierarchy.dendrogram` [``False``]
figsize : float
set the vertical size of plot in inches [``4.5``]
annot_size : float
font size of annotation labels on heat map [``6.5``]
Returns
-------
Z
`Z` from :meth:`cluster`
dgram
`dgram` from :meth:`cluster`
dist_matrix_clus
clustered distance matrix (reordered)
Note
----
This function requires the seaborn_ package, which can be installed
with `pip install seaborn` or `conda install seaborn`.
.. _seaborn: https://seaborn.pydata.org/
.. versionchanged:: 1.0.0
:attr:`tick1On`, :attr:`tick2On`, :attr:`label1On` and :attr:`label2On`
changed to :attr:`tick1line`, :attr:`tick2line`, :attr:`label1` and
:attr:`label2` due to upstream deprecation (see #2493)
"""
from matplotlib.pyplot import figure, colorbar, cm, savefig, clf
try:
import seaborn as sns
except ImportError:
raise ImportError(
"""ERROR --- The seaborn package cannot be found!
The seaborn API could not be imported. Please install it first.
You can try installing with pip directly from the
internet:
pip install seaborn
Alternatively, download the package from
http://pypi.python.org/pypi/seaborn/
and install in the usual manner.
"""
) from None
if self.D is None:
raise ValueError(
"No distance data; do 'PSAnalysis.run()' first.")
dist_matrix = self.D
Z, dgram = self.cluster(method=linkage, \
count_sort=count_sort, \
distance_sort=distance_sort, \
no_plot=True)
rowidx = colidx = dgram['leaves'] # get row-wise ordering from clustering
dist_matrix_clus = dist_matrix[rowidx,:]
dist_matrix_clus = dist_matrix_clus[:,colidx]
clf()
aspect_ratio = 1.25
fig = figure(figsize=(figsize*aspect_ratio, figsize))
ax_hmap = fig.add_subplot(111)
ax_hmap = sns.heatmap(dist_matrix_clus, \
linewidths=0.25, cmap=cm.YlGn, annot=True, fmt='3.1f', \
square=True, xticklabels=rowidx, yticklabels=colidx, \
annot_kws={"size": 7}, ax=ax_hmap)
# Remove major ticks from both heat map axes
for tic in ax_hmap.xaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
tic.label1.set_visible(False)
tic.label2.set_visible(False)
for tic in ax_hmap.yaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
tic.label1.set_visible(False)
tic.label2.set_visible(False)
# Remove minor ticks from both heat map axes
for tic in ax_hmap.xaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
for tic in ax_hmap.yaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
if filename is not None:
head = os.path.join(self.targetdir, self.datadirs['plots'])
outfile = os.path.join(head, filename)
savefig(outfile, dpi=600, bbox_inches='tight')
return Z, dgram, dist_matrix_clus
def plot_nearest_neighbors(self, filename=None, idx=0, \
labels=('Path 1', 'Path 2'), figsize=4.5, \
multiplot=False, aspect_ratio=1.75, \
labelsize=12):
"""Plot nearest neighbor distances as a function of normalized frame
number.
The frame number is mapped to the interval *[0, 1]*.
If `filename` is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`matplotlib.pyplot.imshow`.
Parameters
----------
filename : str
save figure to *filename* [``None``]
idx : int
index of path (pair) comparison to plot [``0``]
labels : (str, str)
pair of names to label nearest neighbor distance
curves [``('Path 1', 'Path 2')``]
figsize : float
set the vertical size of plot in inches [``4.5``]
multiplot : bool
set to ``True`` to enable plotting multiple nearest
neighbor distances on the same figure [``False``]
aspect_ratio : float
set the ratio of width to height of the plot [``1.75``]
labelsize : float
set the font size for colorbar labels; font size for path labels on
dendrogram default to 3 points smaller [``12``]
Returns
-------
ax : axes
Note
----
This function requires the seaborn_ package, which can be installed
with `pip install seaborn` or `conda install seaborn`.
.. _seaborn: https://seaborn.pydata.org/
"""
from matplotlib.pyplot import figure, savefig, tight_layout, clf, show
try:
import seaborn as sns
except ImportError:
raise ImportError(
"""ERROR --- The seaborn package cannot be found!
The seaborn API could not be imported. Please install it first.
You can try installing with pip directly from the
internet:
pip install seaborn
Alternatively, download the package from
http://pypi.python.org/pypi/seaborn/
and install in the usual manner.
"""
) from None
colors = sns.xkcd_palette(["cherry", "windows blue"])
if self._NN is None:
raise ValueError("No nearest neighbor data; run "
"'PSAnalysis.run_pairs_analysis(neighbors=True)' first.")
sns.set_style('whitegrid')
if not multiplot:
clf()
fig = figure(figsize=(figsize*aspect_ratio, figsize))
ax = fig.add_subplot(111)
nn_dist_P, nn_dist_Q = self._NN[idx]['distances']
frames_P = len(nn_dist_P)
frames_Q = len(nn_dist_Q)
progress_P = np.asarray(range(frames_P))/(1.0*frames_P)
progress_Q = np.asarray(range(frames_Q))/(1.0*frames_Q)
ax.plot(progress_P, nn_dist_P, color=colors[0], lw=1.5, label=labels[0])
ax.plot(progress_Q, nn_dist_Q, color=colors[1], lw=1.5, label=labels[1])
ax.legend()
ax.set_xlabel(r'(normalized) progress by frame number', fontsize=12)
ax.set_ylabel(r'nearest neighbor rmsd ($\AA$)', fontsize=12)
ax.tick_params(axis='both', which='major', labelsize=12, pad=4)
sns.despine(bottom=True, left=True, ax=ax)
tight_layout()
if filename is not None:
head = os.path.join(self.targetdir, self.datadirs['plots'])
outfile = os.path.join(head, filename)
savefig(outfile, dpi=300, bbox_inches='tight')
return ax
def cluster(self, dist_mat=None, method='ward', count_sort=False, \
distance_sort=False, no_plot=False, no_labels=True, \
color_threshold=4):
"""Cluster trajectories and optionally plot the dendrogram.
This method is used by :meth:`PSAnalysis.plot` to generate a heatmap-
dendrogram combination plot. By default, the distance matrix,
:attr:`PSAnalysis.D`, is assumed to exist, converted to
distance-vector form, and inputted to :func:`cluster.hierarchy.linkage`
to generate a clustering. For convenience in plotting arbitrary
distance matrices, one can also be specify `dist_mat`, which will be
checked for proper distance matrix form by
:func:`spatial.distance.squareform`
Parameters
----------
dist_mat : numpy.ndarray
user-specified distance matrix to be clustered [``None``]
method : str
name of linkage criterion for clustering [``'ward'``]
no_plot : bool
if ``True``, do not render the dendrogram [``False``]
no_labels : bool
if ``True`` then do not label dendrogram [``True``]
color_threshold : float
For brevity, let t be the color_threshold. Colors all the
descendent links below a cluster node k the same color if k is
the first node below the cut threshold t. All links connecting
nodes with distances greater than or equal to the threshold are
colored blue. If t is less than or equal to zero, all nodes are
colored blue. If color_threshold is None or ‘default’,
corresponding with MATLAB(TM) behavior, the threshold is set to
0.7*max(Z[:,2]). [``4``]]
Returns
-------
Z
output from :func:`scipy.cluster.hierarchy.linkage`;
list of indices representing the row-wise order of the objects
after clustering
dgram
output from :func:`scipy.cluster.hierarchy.dendrogram`
"""
# perhaps there is a better way to manipulate the plot... or perhaps it
# is not even necessary? In any case, the try/finally makes sure that
# we are not permanently changing the user's global state
orig_linewidth = matplotlib.rcParams['lines.linewidth']
matplotlib.rcParams['lines.linewidth'] = 0.5
try:
if dist_mat:
dist_vec = spatial.distance.squareform(dist_mat,
force='tovector',
checks=True)
else:
dist_vec = self.get_pairwise_distances(vectorform=True)
Z = cluster.hierarchy.linkage(dist_vec, method=method)
dgram = cluster.hierarchy.dendrogram(
Z, no_labels=no_labels, orientation='left',
count_sort=count_sort, distance_sort=distance_sort,
no_plot=no_plot, color_threshold=color_threshold)
finally:
matplotlib.rcParams['lines.linewidth'] = orig_linewidth
return Z, dgram
def _get_plot_obj_locs(self):
"""Find and return coordinates for dendrogram, heat map, and colorbar.
Returns
-------
tuple
tuple of coordinates for placing the dendrogram, heat map, and
colorbar in the plot.
"""
plot_xstart = 0.04
plot_ystart = 0.04
label_margin = 0.155
dgram_height = 0.2 # dendrogram heights(s)
hmap_xstart = plot_xstart + dgram_height + label_margin
# Set locations for dendrogram(s), matrix, and colorbar
hmap_height = 0.8
hmap_width = 0.6
dgram_loc = [plot_xstart, plot_ystart, dgram_height, hmap_height]
cbar_width = 0.02
cbar_xstart = hmap_xstart + hmap_width + 0.01
cbar_loc = [cbar_xstart, plot_ystart, cbar_width, hmap_height]
hmap_loc = [hmap_xstart, plot_ystart, hmap_width, hmap_height]
return dgram_loc, hmap_loc, cbar_loc
def get_num_atoms(self):
"""Return the number of atoms used to construct the :class:`Path` instances in
:class:`PSA`.
Returns
-------
int
the number of atoms in any path
Note
----
Must run :meth:`PSAnalysis.generate_paths` prior to calling this
method.
"""
if self.natoms is None:
raise ValueError(
"No path data; do 'PSAnalysis.generate_paths()' first.")
return self.natoms
def get_num_paths(self):
"""Return the number of paths in :class:`PSA`.
Note
----
Must run :meth:`PSAnalysis.generate_paths` prior to calling this method.
Returns
-------
int
the number of paths in :class:`PSA`
"""
if self.npaths is None:
raise ValueError(
"No path data; do 'PSAnalysis.generate_paths()' first.")
return self.npaths
def get_paths(self):
"""Return the paths in :class:`PSA`.
Note
----
Must run :meth:`PSAnalysis.generate_paths` prior to calling this
method.
Returns
-------
list
list of :class:`numpy.ndarray` representations of paths in
:class:`PSA`
"""
if self.paths is None:
raise ValueError(
"No path data; do 'PSAnalysis.generate_paths()' first.")
return self.paths
def get_pairwise_distances(self, vectorform=False, checks=False):
"""Return the distance matrix (or vector) of pairwise path distances.
Note
----
Must run :meth:`PSAnalysis.run` prior to calling this method.
Parameters
----------
vectorform : bool
if ``True``, return the distance vector instead [``False``]
checks : bool
if ``True``, check that :attr:`PSAnalysis.D` is a proper distance
matrix [``False``]
Returns
-------
numpy.ndarray
representation of the distance matrix (or vector)
"""
if self.D is None:
raise ValueError(
"No distance data; do 'PSAnalysis.run()' first.")
if vectorform:
return spatial.distance.squareform(self.D, force='tovector',
checks=checks)
else:
return self.D
@property
def psa_pairs(self):
"""The list of :class:`PSAPair` instances for each pair of paths.
:attr:`psa_pairs` is a list of all :class:`PSAPair` objects (in
distance vector order). The elements of a :class:`PSAPair` are pairs of
paths that have been compared using
:meth:`PSAnalysis.run_pairs_analysis`. Each :class:`PSAPair` contains
nearest neighbor and Hausdorff pair information specific to a pair of
paths. The nearest neighbor frames and distances for a :class:`PSAPair`
can be accessed in the nearest neighbor dictionary using the keys
'frames' and 'distances', respectively. E.g.,
:attr:`PSAPair.nearest_neighbors['distances']` returns a *pair* of
:class:`numpy.ndarray` corresponding to the nearest neighbor distances
for each path. Similarly, Hausdorff pair information can be accessed
using :attr:`PSAPair.hausdorff_pair` with the keys 'frames' and
'distance'.
Note
----
Must run :meth:`PSAnalysis.run_pairs_analysis` prior to calling this
method.
"""
if self._psa_pairs is None:
raise ValueError("No nearest neighbors data; do"
" 'PSAnalysis.run_pairs_analysis()' first.")
return self._psa_pairs
@property
def hausdorff_pairs(self):
"""The Hausdorff pair for each (unique) pairs of paths.
This attribute contains a list of Hausdorff pair information (in
distance vector order), where each element is a dictionary containing
the pair of frames and the (Hausdorff) distance between a pair of
paths. See :meth:`PSAnalysis.psa_pairs` and
:attr:`PSAPair.hausdorff_pair` for more information about accessing
Hausdorff pair data.
Note
----
Must run :meth:`PSAnalysis.run_pairs_analysis` with
``hausdorff_pairs=True`` prior to calling this method.
"""
if self._HP is None:
raise ValueError("No Hausdorff pairs data; do "
"'PSAnalysis.run_pairs_analysis(hausdorff_pairs=True)' "
"first.")
return self._HP
@property
def nearest_neighbors(self):
"""The nearest neighbors for each (unique) pair of paths.
This attribute contains a list of nearest neighbor information (in
distance vector order), where each element is a dictionary containing
the nearest neighbor frames and distances between a pair of paths. See
:meth:`PSAnalysis.psa_pairs` and :attr:`PSAPair.nearest_neighbors` for
more information about accessing nearest neighbor data.
Note
----
Must run :meth:`PSAnalysis.run_pairs_analysis` with
``neighbors=True`` prior to calling this method.
"""
if self._NN is None:
raise ValueError("No nearest neighbors data; do"
" 'PSAnalysis.run_pairs_analysis(neighbors=True)'"
" first.")
return self._NN
| gpl-2.0 |
msarahan/bokeh | bokeh/charts/builder.py | 3 | 25115 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Builder class, a minimal prototype class to build more chart
types on top of it.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from .attributes import AttrSpec, ColorAttr, CatAttr
from .chart import Chart
from .data_source import ChartDataSource
from .models import CompositeGlyph
from .properties import Dimension, ColumnLabel
from .utils import collect_attribute_columns, label_from_index_dict, build_hover_tooltips
from .data_source import OrderedAssigner
from ..models.ranges import Range, Range1d, FactorRange
from ..models.sources import ColumnDataSource
from ..core.properties import (HasProps, Instance, List, String, Dict,
Color, Bool, Tuple, Either)
from ..io import curdoc, curstate
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def create_and_build(builder_class, *data, **kws):
"""A factory function for handling Chart and Builder generation.
Returns:
:class:`Chart`
"""
if getattr(builder_class, 'dimensions') is None:
raise NotImplementedError('Each builder must specify its dimensions, %s does not.' % builder_class.__name__)
if getattr(builder_class, 'default_attributes') is None:
raise NotImplementedError('Each builder must specify its default_attributes, %s does not.' % builder_class.__name__)
builder_props = set(builder_class.properties())
# append dimensions to the builder props
for dim in builder_class.dimensions:
builder_props.add(dim)
# append attributes to the builder props
for attr_name in builder_class.default_attributes.keys():
builder_props.add(attr_name)
# create the new builder
builder_kws = {k: v for k, v in kws.items() if k in builder_props}
builder = builder_class(*data, **builder_kws)
# create a chart to return, since there isn't one already
chart_kws = { k:v for k,v in kws.items() if k not in builder_props}
chart = Chart(**chart_kws)
chart.add_builder(builder)
chart.start_plot()
curdoc()._current_plot = chart # TODO (havocp) store this on state, not doc?
if curstate().autoadd:
curdoc().add_root(chart)
return chart
class Builder(HasProps):
""" A prototype class to inherit each new chart Builder type.
It provides useful methods to be used by the inherited builder classes,
in order to automate most of the charts creation tasks and leave the
core customization to specialized builder classes. In that pattern
inherited builders just need to provide the following methods:
Required:
* :meth:`~bokeh.charts.builder.Builder.yield_renderers`: yields the glyphs to be
rendered into the plot. Here you should call the
:meth:`~bokeh.charts.builder.Builder.add_glyph` method so that the builder can
setup the legend for you.
* :meth:`~bokeh.charts.builder.Builder.set_ranges`: setup the ranges for the
glyphs. This is called after glyph creation, so you are able to inspect the
comp_glyphs for their minimum and maximum values. See the
:meth:`~bokeh.charts.builder.Builder.create` method for more information on
when this is called and how the builder provides the ranges to the containing
:class:`Chart` using the :meth:`Chart.add_ranges` method.
Optional:
* :meth:`~bokeh.charts.builder.Builder.setup`: provides an area
where subclasses of builder can introspect properties, setup attributes, or change
property values. This is called before
:meth:`~bokeh.charts.builder.Builder.process_data`.
* :meth:`~bokeh.charts.builder.Builder.process_data`: provides an area
where subclasses of builder can manipulate the source data before renderers are
created.
"""
# Optional Inputs
x_range = Instance(Range)
y_range = Instance(Range)
xlabel = String()
ylabel = String()
xscale = String()
yscale = String()
palette = List(Color, help="""Optional input to override the default palette used
by any color attribute.
""")
# Dimension Configuration
"""
The dimension labels that drive the position of the
glyphs. Subclasses should implement this so that the Builder
base class knows which dimensions it needs to operate on.
An example for a builder working with cartesian x and y
coordinates would be dimensions = ['x', 'y']. You should
then instantiate the x and y dimensions as attributes of the
subclass of builder using the :class:`Dimension
<bokeh.charts.properties.Dimension>` class. One for x, as x
= Dimension(...), and one as y = Dimension(...).
"""
dimensions = None # None because it MUST be overridden
"""
The dimension labels that must exist to produce the
glyphs. This specifies what are the valid configurations for
the chart, with the option of specifying the type of the
columns. The
:class:`~bokeh.charts.data_source.ChartDataSource` will
inspect this property of your subclass of Builder and use
this to fill in any required dimensions if no keyword
arguments are used.
"""
req_dimensions = []
# Attribute Configuration
attributes = Dict(String, Instance(AttrSpec), help="""
The attribute specs used to group data. This is a mapping between the role of
the attribute spec (e.g. 'color') and the
:class:`~bokeh.charts.attributes.AttrSpec` class (e.g.,
:class:`~bokeh.charts.attributes.ColorAttr`). The Builder will use this
attributes property during runtime, which will consist of any attribute specs
that are passed into the chart creation function (e.g.,
:class:`~bokeh.charts.Bar`), ones that are created for the user from simple
input types (e.g. `Bar(..., color='red')` or `Bar(..., color=<column_name>)`),
or lastly, the attribute spec found in the default_attributes configured for
the subclass of :class:`~bokeh.charts.builder.Builder`.
""")
"""
The default attribute specs used to group data. This is
where the subclass of Builder should specify what the
default attributes are that will yield attribute values to
each group of data, and any specific configuration. For
example, the :class:`ColorAttr` utilizes a default palette
for assigning color based on groups of data. If the user
doesn't assign a column of the data to the associated
attribute spec, then the default attrspec is used, which
will yield a constant color value for each group of
data. This is by default the first color in the default
palette, but can be customized by setting the default color
in the ColorAttr.
"""
default_attributes = None # None because it MUST be overridden
# Derived properties (created by Builder at runtime)
attribute_columns = List(ColumnLabel, help="""
All columns used for specifying attributes for the Chart. The Builder will set
this value on creation so that the subclasses can know the distinct set of columns
that are being used to assign attributes.
""")
comp_glyphs = List(Instance(CompositeGlyph), help="""
A list of composite glyphs, where each represents a unique subset of data. The
composite glyph is a helper class that encapsulates all low level
:class:`~bokeh.models.glyphs.Glyph`, that represent a higher level group of
data. For example, the :class:`BoxGlyph` is a single class that yields
each :class:`GlyphRenderer` needed to produce a Box on a :class:`BoxPlot`. The
single Box represents a full array of values that are aggregated, and is made
up of multiple :class:`~bokeh.models.glyphs.Rect` and
:class:`~bokeh.models.glyphs.Segment` glyphs.
""")
labels = List(String, help="""Represents the unique labels to be used for legends.""")
"""List of attributes to use for legends."""
label_attributes = []
"""
Used to assign columns to dimensions when no selections have been provided. The
default behavior is provided by the :class:`OrderedAssigner`, which assigns
a single column to each dimension available in the `Builder`'s `dims` property.
"""
column_selector = OrderedAssigner
comp_glyph_types = List(Instance(CompositeGlyph))
sort_dim = Dict(String, Bool, default={})
sort_legend = List(Tuple(String, Bool), help="""
List of tuples to use for sorting the legend, in order that they should be
used for sorting. This sorting can be different than the sorting used for the
rest of the chart. For example, you might want to sort only on the column
assigned to the color attribute, or sort it descending. The order of each tuple
is (Column, Ascending).
""")
source = Instance(ColumnDataSource)
tooltips = Either(List(Tuple(String, String)), List(String), Bool, default=None,
help="""
Tells the builder to add tooltips to the chart by either using the columns
specified to the chart attributes (True), or by generating tooltips for each
column specified (list(str)), or by explicit specification of the tooltips
using the valid input for the `HoverTool` tooltips kwarg.
""")
def __init__(self, *args, **kws):
"""Common arguments to be used by all the inherited classes.
Args:
data (:ref:`userguide_charts_data_types`): source data for the chart
legend (str, bool): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ChartDataSource for each Builder class.
attr (list(AttrSpec)): to be filled with the new attributes created after
loading the data dict.
"""
data = None
if len(args) != 0 or len(kws) != 0:
# chart dimensions can be literal dimensions or attributes
attrs = list(self.default_attributes.keys())
dims = self.dimensions + attrs
# pop the dimension inputs from kwargs
data_args = {}
for dim in dims:
if dim in kws.keys():
data_args[dim] = kws[dim]
# build chart data source from inputs, given the dimension configuration
data_args['dims'] = tuple(dims)
data_args['required_dims'] = tuple(self.req_dimensions)
data_args['attrs'] = attrs
data_args['column_assigner'] = self.column_selector
data = ChartDataSource.from_data(*args, **data_args)
# make sure that the builder dimensions have access to the chart data source
for dim in self.dimensions:
getattr(getattr(self, dim), 'set_data')(data)
# handle input attrs and ensure attrs have access to data
attributes = self._setup_attrs(data, kws)
# remove inputs handled by dimensions and chart attributes
for dim in dims:
kws.pop(dim, None)
else:
attributes = dict()
kws['attributes'] = attributes
super(Builder, self).__init__(**kws)
# collect unique columns used for attributes
self.attribute_columns = collect_attribute_columns(**self.attributes)
self._data = data
self._legends = []
def _setup_attrs(self, data, kws):
"""Handle overridden attributes and initialize them with data.
Makes sure that all attributes have access to the data
source, which is used for mapping attributes to groups
of data.
Returns:
None
"""
source = ColumnDataSource(data.df)
attr_names = self.default_attributes.keys()
custom_palette = kws.get('palette')
attributes = dict()
for attr_name in attr_names:
attr = kws.pop(attr_name, None)
# if given an attribute use it
if isinstance(attr, AttrSpec):
attributes[attr_name] = attr
# if we are given columns, use those
elif isinstance(attr, str) or isinstance(attr, list):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# override palette if available
if isinstance(attributes[attr_name], ColorAttr):
if custom_palette is not None:
attributes[attr_name].iterable = custom_palette
attributes[attr_name].setup(data=source, columns=attr)
else:
# override palette if available
if (isinstance(self.default_attributes[attr_name], ColorAttr) and
custom_palette is not None):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
attributes[attr_name].iterable = custom_palette
else:
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# make sure all have access to data source
for attr_name in attr_names:
attributes[attr_name].update_data(data=source)
return attributes
def setup(self):
"""Perform any initial pre-processing, attribute config.
Returns:
None
"""
pass
def process_data(self):
"""Make any global data manipulations before grouping.
It has to be implemented by any of the inherited class
representing each different chart type. It is the place
where we make specific calculations for each chart.
Returns:
None
"""
pass
def yield_renderers(self):
""" Generator that yields the glyphs to be draw on the plot
It has to be implemented by any of the inherited class
representing each different chart type.
Yields:
:class:`GlyphRenderer`
"""
raise NotImplementedError('Subclasses of %s must implement _yield_renderers.' %
self.__class__.__name__)
def set_ranges(self):
"""Calculate and set the x and y ranges.
It has to be implemented by any of the subclasses of builder
representing each different chart type, and is called after
:meth:`yield_renderers`.
Returns:
None
"""
raise NotImplementedError('Subclasses of %s must implement _set_ranges.' %
self.__class__.__name__)
def get_dim_extents(self):
"""Helper method to retrieve maximum extents of all the renderers.
Returns:
a dict mapping between dimension and value for x_max, y_max, x_min, y_min
"""
return {'x_max': max([renderer.x_max for renderer in self.comp_glyphs]),
'y_max': max([renderer.y_max for renderer in self.comp_glyphs]),
'x_min': min([renderer.x_min for renderer in self.comp_glyphs]),
'y_min': min([renderer.y_min for renderer in self.comp_glyphs])
}
def add_glyph(self, group, glyph):
"""Add a composite glyph.
Manages the legend, since the builder might not want all attribute types
used for the legend.
Args:
group (:class:`DataGroup`): the data the `glyph` is associated with
glyph (:class:`CompositeGlyph`): the glyph associated with the `group`
Returns:
None
"""
if isinstance(glyph, list):
for sub_glyph in glyph:
self.comp_glyphs.append(sub_glyph)
else:
self.comp_glyphs.append(glyph)
# handle cases where builders have specified which attributes to use for labels
label = None
if len(self.label_attributes) > 0:
for attr in self.label_attributes:
# this will get the last attribute group label for now
if self.attributes[attr].columns is not None:
label = self._get_group_label(group, attr=attr)
# if no special case for labeling, just use the group label
if label is None:
label = self._get_group_label(group, attr='label')
# add to legend if new and unique label
if str(label) not in self.labels and label is not None:
self._legends.append((label, glyph.renderers))
self.labels.append(label)
def _get_group_label(self, group, attr='label'):
"""Get the label of the group by the attribute name.
Args:
group (:attr:`DataGroup`: the group of data
attr (str, optional): the attribute name containing the label, defaults to
'label'.
Returns:
str: the label for the group
"""
if attr is 'label':
label = group.label
else:
label = group[attr]
if isinstance(label, dict):
label = tuple(label.values())
return self._get_label(label)
@staticmethod
def _get_label(raw_label):
"""Converts a label by string or tuple to a string representation.
Args:
raw_label (str or tuple(any, any)): a unique identifier for the data group
Returns:
str: a label that is usable in charts
"""
# don't convert None type to string so we can test for it later
if raw_label is None:
return None
if (isinstance(raw_label, tuple) or isinstance(raw_label, list)) and \
len(raw_label) == 1:
raw_label = raw_label[0]
elif isinstance(raw_label, dict):
raw_label = label_from_index_dict(raw_label)
return str(raw_label)
def collect_attr_kwargs(self):
if hasattr(super(self.__class__, self), 'default_attributes'):
attrs = set(self.default_attributes.keys()) - set(
(super(self.__class__, self).default_attributes or {}).keys())
else:
attrs = set()
return attrs
def get_group_kwargs(self, group, attrs):
return {attr: group[attr] for attr in attrs}
def create(self, chart=None):
"""Builds the renderers, adding them and other components to the chart.
Args:
chart (:class:`Chart`, optional): the chart that will contain the glyph
renderers that the `Builder` produces.
Returns:
:class:`Chart`
"""
# call methods that allow customized setup by subclasses
self.setup()
self.process_data()
# create and add renderers to chart
renderers = self.yield_renderers()
if chart is None:
chart = Chart()
chart.add_renderers(self, renderers)
# handle ranges after renders, since ranges depend on aggregations
# ToDo: should reconsider where this occurs
self.set_ranges()
chart.add_ranges('x', self.x_range)
chart.add_ranges('y', self.y_range)
# always contribute legends, let Chart sort it out
chart.add_legend(self._legends)
chart.add_labels('x', self.xlabel)
chart.add_labels('y', self.ylabel)
chart.add_scales('x', self.xscale)
chart.add_scales('y', self.yscale)
if self.tooltips is not None:
tooltips = build_hover_tooltips(hover_spec=self.tooltips,
chart_cols=self.attribute_columns)
chart.add_tooltips(tooltips)
return chart
@classmethod
def generate_help(cls):
help_str = ''
for comp_glyph in cls.comp_glyph_types:
help_str += str(comp_glyph.glyph_properties())
return help_str
class XYBuilder(Builder):
"""Implements common functionality for XY Builders."""
x = Dimension('x')
y = Dimension('y')
dimensions = ['x', 'y']
req_dimensions = [['x'],
['y'],
['x', 'y']]
default_attributes = {'color': ColorAttr()}
def set_ranges(self):
"""Calculate and set the x and y ranges."""
# ToDo: handle when only single dimension is provided
extents = self.get_dim_extents()
endx = extents['x_max']
startx = extents['x_min']
self.x_range = self._get_range('x', startx, endx)
endy = extents['y_max']
starty = extents['y_min']
self.y_range = self._get_range('y', starty, endy)
if self.xlabel is None:
if self.x.selection is not None:
select = self.x.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.xlabel = ', '.join(select)
if self.ylabel is None:
if self.y.selection is not None:
select = self.y.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.ylabel = ', '.join(select)
# sort the legend if we are told to
if len(self.sort_legend) > 0:
for attr, asc in self.sort_legend:
if len(self.attributes[attr].columns) > 0:
item_order = self.attributes[attr].items
self._legends = list(sorted(self._legends, key=lambda leg:
item_order.index(leg[0]),
reverse=~asc))
def _get_range(self, dim, start, end):
"""Create a :class:`Range` for the :class:`Chart`.
Args:
dim (str): the name of the dimension, which is an attribute of the builder
start: the starting value of the range
end: the ending value of the range
Returns:
:class:`Range`
"""
dim_ref = getattr(self, dim)
values = dim_ref.data
dtype = dim_ref.dtype.name
sort = self.sort_dim.get(dim)
# object data or single value
if dtype == 'object':
factors = values.drop_duplicates()
if sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
factors.sort_values(inplace=True)
except AttributeError:
factors.sort(inplace=True)
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=factors.tolist())
elif 'datetime' in dtype:
setattr(self, dim + 'scale', 'datetime')
return Range1d(start=start, end=end)
else:
if end == 'None' or (end - start) == 0:
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=['None'])
else:
diff = end - start
setattr(self, dim + 'scale', 'linear')
return Range1d(start=start - 0.1 * diff, end=end + 0.1 * diff)
class AggregateBuilder(Builder):
"""A base class for deriving specific builders performing aggregation with stats.
The typical AggregateBuilder takes a single dimension of values.
"""
values = Dimension('values')
default_attributes = {'label': CatAttr(),
'color': ColorAttr()}
| bsd-3-clause |
sshh12/StockMarketML | lab1/HeadlinePredictionClassification.py | 1 | 3059 |
# coding: utf-8
# In[1]:
# Setup (Imports)
from LoadData import *
from keras.models import Sequential
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from keras.layers import Dense, LSTM, Dropout, Flatten, Conv1D, BatchNormalization, Activation, GlobalMaxPooling1D
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
# Setup (Globals/Hyperz)
window_size = 4
epochs = 750
batch_size = 128
emb_size = 100
# In[3]:
# Loading and Splitting Data
def get_data(stock):
AllX, AllY = create_timeframed_doc2vec_classification_data(stock, window_size, min_time_disparity=4)
trainX, trainY, testX, testY = split_data(AllX, AllY, ratio=.85)
return (trainX, trainY), (testX, testY)
# In[4]:
# Make Model
def get_model():
model = Sequential()
model.add(Conv1D(filters=8, kernel_size=3, padding='same', input_shape=(window_size, emb_size), name="hl_conv1d"))
model.add(GlobalMaxPooling1D(name="hl_gpool"))
#model.add(Dense(30, name="hl_d1"))
#model.add(BatchNormalization(name="hl_bn1"))
#model.add(Activation('selu', name="hl_a1"))
#model.add(Dropout(0.5, name="hl_do1"))
model.add(Dense(10, name="hl_d2"))
model.add(BatchNormalization(name="hl_bn2"))
model.add(Activation('selu', name="hl_a2"))
model.add(Dropout(0.2, name="hl_do2"))
model.add(Dense(8, name="hl_d3"))
model.add(BatchNormalization(name="hl_bn3"))
model.add(Activation('selu', name="hl_a3"))
model.add(Dropout(0.1, name="hl_do3"))
model.add(Dense(2, activation='softmax', name="hl_d4"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# In[5]:
# Run (Load)
if __name__ == "__main__":
(trainX, trainY), (testX, testY) = get_data('AAPL')
print(trainX.shape, trainY.shape)
# In[6]:
# Run (Train)
if __name__ == "__main__":
model = get_model()
reduce_LR = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=30, min_lr=1e-6, verbose=0)
e_stopping = EarlyStopping(patience=60)
checkpoint = ModelCheckpoint(os.path.join('models', 'headline-classification.h5'),
monitor='val_loss',
verbose=0,
save_best_only=True)
history = model.fit(trainX, trainY, epochs=epochs,
batch_size=batch_size,
validation_data=(testX, testY),
verbose=0,
callbacks=[checkpoint, e_stopping])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['TrainLoss', 'TestLoss'])
plt.show()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['TrainAcc', 'TestAcc'])
plt.show()
| mit |
openego/eDisGo | edisgo/data/import_data.py | 1 | 89664 | from ..grid.components import Load, Generator, BranchTee, MVStation, Line, \
Transformer, LVStation, GeneratorFluctuating
from ..grid.grids import MVGrid, LVGrid
from ..grid.connect import connect_mv_generators, connect_lv_generators
from ..grid.tools import select_cable, position_switch_disconnectors
from ..tools.geo import proj2equidistant
from edisgo.tools import pypsa_io
from edisgo.tools import session_scope
from egoio.db_tables import model_draft, supply
from sqlalchemy import func
from workalendar.europe import Germany
from demandlib import bdew as bdew, particular_profiles as profiles
import datetime
import pandas as pd
import numpy as np
import networkx as nx
from math import isnan
import random
import os
if not 'READTHEDOCS' in os.environ:
from ding0.tools.results import load_nd_from_pickle
from ding0.core.network.stations import LVStationDing0
from ding0.core.structure.regions import LVLoadAreaCentreDing0
from ding0.core import GeneratorFluctuatingDing0
from shapely.ops import transform
from shapely.wkt import loads as wkt_loads
import logging
logger = logging.getLogger('edisgo')
def import_from_ding0(file, network):
"""
Import an eDisGo grid topology from
`Ding0 data <https://github.com/openego/ding0>`_.
This import method is specifically designed to load grid topology data in
the format as `Ding0 <https://github.com/openego/ding0>`_ provides it via
pickles.
The import of the grid topology includes
* the topology itself
* equipment parameter
* generators incl. location, type, subtype and capacity
* loads incl. location and sectoral consumption
Parameters
----------
file: :obj:`str` or :class:`ding0.core.NetworkDing0`
If a str is provided it is assumed it points to a pickle with Ding0
grid data. This file will be read.
If an object of the type :class:`ding0.core.NetworkDing0` data will be
used directly from this object.
network: :class:`~.grid.network.Network`
The eDisGo data container object
Notes
-----
Assumes :class:`ding0.core.NetworkDing0` provided by `file` contains
only data of one mv_grid_district.
"""
# when `file` is a string, it will be read by the help of pickle
if isinstance(file, str):
ding0_nd = load_nd_from_pickle(filename=file)
# otherwise it is assumed the object is passed directly
else:
ding0_nd = file
ding0_mv_grid = ding0_nd._mv_grid_districts[0].mv_grid
# Make sure circuit breakers (respectively the rings) are closed
ding0_mv_grid.close_circuit_breakers()
# Import medium-voltage grid data
network.mv_grid = _build_mv_grid(ding0_mv_grid, network)
# Import low-voltage grid data
lv_grids, lv_station_mapping, lv_grid_mapping = _build_lv_grid(
ding0_mv_grid, network)
# Assign lv_grids to network
network.mv_grid.lv_grids = lv_grids
# Integrate disconnecting points
position_switch_disconnectors(network.mv_grid,
mode=network.config['disconnecting_point'][
'position'])
# Check data integrity
_validate_ding0_grid_import(network.mv_grid, ding0_mv_grid,
lv_grid_mapping)
# Set data source
network.set_data_source('grid', 'dingo')
# Set more params
network._id = network.mv_grid.id
# Update the weather_cell_ids in mv_grid to include the ones in lv_grids
# ToDo: maybe get a better solution to push the weather_cell_ids in lv_grids but not in mv_grid but into the
# mv_grid.weather_cell_ids from within the Grid() object or the MVGrid() or LVGrid()
mv_weather_cell_id = network.mv_grid.weather_cells
for lvg in lv_grids:
if lvg.weather_cells:
for lv_w_id in lvg._weather_cells:
if not (lv_w_id in mv_weather_cell_id):
network.mv_grid._weather_cells.append(lv_w_id)
def _build_lv_grid(ding0_grid, network):
"""
Build eDisGo LV grid from Ding0 data
Parameters
----------
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Returns
-------
list of LVGrid
LV grids
dict
Dictionary containing a mapping of LV stations in Ding0 to newly
created eDisGo LV stations. This mapping is used to use the same
instances of LV stations in the MV grid graph.
"""
lv_station_mapping = {}
lv_grids = []
lv_grid_mapping = {}
for la in ding0_grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
ding0_lv_grid = lvgd.lv_grid
if not ding0_lv_grid.grid_district.lv_load_area.is_aggregated:
# Create LV grid instance
lv_grid = LVGrid(
id=ding0_lv_grid.id_db,
geom=ding0_lv_grid.grid_district.geo_data,
grid_district={
'geom': ding0_lv_grid.grid_district.geo_data,
'population': ding0_lv_grid.grid_district.population},
voltage_nom=ding0_lv_grid.v_level / 1e3,
network=network)
station = {repr(_): _ for _ in
network.mv_grid.graph.nodes_by_attribute(
'lv_station')}['LVStation_' + str(
ding0_lv_grid._station.id_db)]
station.grid = lv_grid
for t in station.transformers:
t.grid = lv_grid
lv_grid.graph.add_node(station, type='lv_station')
lv_station_mapping.update({ding0_lv_grid._station: station})
# Create list of load instances and add these to grid's graph
loads = {_: Load(
id=_.id_db,
geom=_.geo_data,
grid=lv_grid,
consumption=_.consumption) for _ in ding0_lv_grid.loads()}
lv_grid.graph.add_nodes_from(loads.values(), type='load')
# Create list of generator instances and add these to grid's
# graph
generators = {_: (GeneratorFluctuating(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=lv_grid,
weather_cell_id=_.weather_cell_id,
v_level=_.v_level) if _.type in ['wind', 'solar'] else
Generator(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=lv_grid,
v_level=_.v_level))
for _ in ding0_lv_grid.generators()}
lv_grid.graph.add_nodes_from(generators.values(),
type='generator')
# Create list of branch tee instances and add these to grid's
# graph
branch_tees = {
_: BranchTee(id=_.id_db,
geom=_.geo_data,
grid=lv_grid,
in_building=_.in_building)
for _ in ding0_lv_grid._cable_distributors}
lv_grid.graph.add_nodes_from(branch_tees.values(),
type='branch_tee')
# Merge node above defined above to a single dict
nodes = {**loads,
**generators,
**branch_tees,
**{ding0_lv_grid._station: station}}
edges = []
edges_raw = list(nx.get_edge_attributes(
ding0_lv_grid._graph, name='branch').items())
for edge in edges_raw:
edges.append({'adj_nodes': edge[0], 'branch': edge[1]})
# Create list of line instances and add these to grid's graph
lines = [(nodes[_['adj_nodes'][0]], nodes[_['adj_nodes'][1]],
{'line': Line(
id=_['branch'].id_db,
type=_['branch'].type,
length=_['branch'].length / 1e3,
kind=_['branch'].kind,
grid=lv_grid)
})
for _ in edges]
# convert voltage from V to kV
for line in lines:
# ToDo: remove work around once it's fixed in ding0
if line[2]['line'].type['U_n'] >= 400:
line[2]['line'].type['U_n'] = \
line[2]['line'].type['U_n'] / 1e3
lv_grid.graph.add_edges_from(lines, type='line')
# Add LV station as association to LV grid
lv_grid._station = station
# Add to lv grid mapping
lv_grid_mapping.update({lv_grid: ding0_lv_grid})
# Put all LV grid to a list of LV grids
lv_grids.append(lv_grid)
# ToDo: don't forget to adapt lv stations creation in MV grid
return lv_grids, lv_station_mapping, lv_grid_mapping
def _build_mv_grid(ding0_grid, network):
"""
Parameters
----------
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
network: Network
The eDisGo container object
Returns
-------
MVGrid
A MV grid of class edisgo.grids.MVGrid is return. Data from the Ding0
MV Grid object is translated to the new grid object.
"""
# Instantiate a MV grid
grid = MVGrid(
id=ding0_grid.id_db,
network=network,
grid_district={'geom': ding0_grid.grid_district.geo_data,
'population':
sum([_.zensus_sum
for _ in
ding0_grid.grid_district._lv_load_areas
if not np.isnan(_.zensus_sum)])},
voltage_nom=ding0_grid.v_level)
# Special treatment of LVLoadAreaCenters see ...
# ToDo: add a reference above for explanation of how these are treated
la_centers = [_ for _ in ding0_grid._graph.nodes()
if isinstance(_, LVLoadAreaCentreDing0)]
if la_centers:
aggregated, aggr_stations, dingo_import_data = \
_determine_aggregated_nodes(la_centers)
network.dingo_import_data = dingo_import_data
else:
aggregated = {}
aggr_stations = []
# create empty DF for imported agg. generators
network.dingo_import_data = pd.DataFrame(columns=('id',
'capacity',
'agg_geno')
)
# Create list of load instances and add these to grid's graph
loads = {_: Load(
id=_.id_db,
geom=_.geo_data,
grid=grid,
consumption=_.consumption) for _ in ding0_grid.loads()}
grid.graph.add_nodes_from(loads.values(), type='load')
# Create list of generator instances and add these to grid's graph
generators = {_: (GeneratorFluctuating(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=grid,
weather_cell_id=_.weather_cell_id,
v_level=_.v_level) if _.type in ['wind', 'solar'] else
Generator(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=grid,
v_level=_.v_level))
for _ in ding0_grid.generators()}
grid.graph.add_nodes_from(generators.values(), type='generator')
# Create list of branch tee instances and add these to grid's graph
branch_tees = {_: BranchTee(id=_.id_db,
geom=_.geo_data,
grid=grid,
in_building=False)
for _ in ding0_grid._cable_distributors}
grid.graph.add_nodes_from(branch_tees.values(), type='branch_tee')
# Create list of LV station instances and add these to grid's graph
stations = {_: LVStation(id=_.id_db,
geom=_.geo_data,
mv_grid=grid,
grid=None, # (this will be set during LV import)
transformers=[Transformer(
mv_grid=grid,
grid=None, # (this will be set during LV import)
id='_'.join(['LVStation',
str(_.id_db),
'transformer',
str(count)]),
geom=_.geo_data,
voltage_op=t.v_level,
type=pd.Series(dict(
S_nom=t.s_max_a, x_pu=t.x_pu, r_pu=t.r_pu))
) for (count, t) in enumerate(_.transformers(), 1)])
for _ in ding0_grid._graph.nodes()
if isinstance(_, LVStationDing0) and _ not in aggr_stations}
grid.graph.add_nodes_from(stations.values(), type='lv_station')
# Create HV-MV station add to graph
mv_station = MVStation(
id=ding0_grid.station().id_db,
geom=ding0_grid.station().geo_data,
grid=grid,
transformers=[Transformer(
mv_grid=grid,
grid=grid,
id='_'.join(['MVStation',
str(ding0_grid.station().id_db),
'transformer',
str(count)]),
geom=ding0_grid.station().geo_data,
voltage_op=_.v_level,
type=pd.Series(dict(
S_nom=_.s_max_a, x_pu=_.x_pu, r_pu=_.r_pu)))
for (count, _) in enumerate(
ding0_grid.station().transformers(), 1)])
grid.graph.add_node(mv_station, type='mv_station')
# Merge node above defined above to a single dict
nodes = {**loads,
**generators,
**branch_tees,
**stations,
**{ding0_grid.station(): mv_station}}
# Create list of line instances and add these to grid's graph
lines = [(nodes[_['adj_nodes'][0]], nodes[_['adj_nodes'][1]],
{'line': Line(
id=_['branch'].id_db,
type=_['branch'].type,
kind=_['branch'].kind,
length=_['branch'].length / 1e3,
grid=grid)
})
for _ in ding0_grid.graph_edges()
if not any([isinstance(_['adj_nodes'][0], LVLoadAreaCentreDing0),
isinstance(_['adj_nodes'][1], LVLoadAreaCentreDing0)])]
# set line name as series name
for line in lines:
line[2]['line'].type.name = line[2]['line'].type['name']
grid.graph.add_edges_from(lines, type='line')
# Assign reference to HV-MV station to MV grid
grid._station = mv_station
# Attach aggregated to MV station
_attach_aggregated(network, grid, aggregated, ding0_grid)
return grid
def _determine_aggregated_nodes(la_centers):
"""Determine generation and load within load areas
Parameters
----------
la_centers: list of LVLoadAreaCentre
Load Area Centers are Ding0 implementations for representating areas of
high population density with high demand compared to DG potential.
Notes
-----
Currently, MV grid loads are not considered in this aggregation function as
Ding0 data does not come with loads in the MV grid level.
Returns
-------
:obj:`list` of dict
aggregated
Dict of the structure
.. code:
{'generation': {
'v_level': {
'subtype': {
'ids': <ids of aggregated generator>,
'capacity'}
}
},
'load': {
'consumption':
'residential': <value>,
'retail': <value>,
...
}
'aggregates': {
'population': int,
'geom': `shapely.Polygon`
}
}
:obj:`list`
aggr_stations
List of LV stations its generation and load is aggregated
"""
def aggregate_generators(gen, aggr):
"""Aggregate generation capacity per voltage level
Parameters
----------
gen: ding0.core.GeneratorDing0
Ding0 Generator object
aggr: dict
Aggregated generation capacity. For structure see
`_determine_aggregated_nodes()`.
Returns
-------
"""
if gen.v_level not in aggr['generation']:
aggr['generation'][gen.v_level] = {}
if gen.type not in aggr['generation'][gen.v_level]:
aggr['generation'][gen.v_level][gen.type] = {}
if gen.subtype not in aggr['generation'][gen.v_level][gen.type]:
aggr['generation'][gen.v_level][gen.type].update(
{gen.subtype: {'ids': [gen.id_db],
'capacity': gen.capacity}})
else:
aggr['generation'][gen.v_level][gen.type][gen.subtype][
'ids'].append(gen.id_db)
aggr['generation'][gen.v_level][gen.type][gen.subtype][
'capacity'] += gen.capacity
return aggr
def aggregate_loads(la_center, aggr):
"""Aggregate consumption in load area per sector
Parameters
----------
la_center: LVLoadAreaCentreDing0
Load area center object from Ding0
Returns
-------
"""
for s in ['retail', 'industrial', 'agricultural', 'residential']:
if s not in aggr['load']:
aggr['load'][s] = 0
aggr['load']['retail'] += sum(
[_.sector_consumption_retail
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['industrial'] += sum(
[_.sector_consumption_industrial
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['agricultural'] += sum(
[_.sector_consumption_agricultural
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['residential'] += sum(
[_.sector_consumption_residential
for _ in la_center.lv_load_area._lv_grid_districts])
return aggr
aggregated = {}
aggr_stations = []
# ToDo: The variable generation_aggr is further used -> delete this code
generation_aggr = {}
for la in la_centers[0].grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
for gen in lvgd.lv_grid.generators():
if la.is_aggregated:
generation_aggr.setdefault(gen.type, {})
generation_aggr[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation_aggr[gen.type][gen.subtype].setdefault('ding0', 0)
generation_aggr[gen.type][gen.subtype]['ding0'] += gen.capacity
dingo_import_data = pd.DataFrame(columns=('id',
'capacity',
'agg_geno')
)
for la_center in la_centers:
aggr = {'generation': {}, 'load': {}, 'aggregates': []}
# Determine aggregated generation in LV grid
for lvgd in la_center.lv_load_area._lv_grid_districts:
weather_cell_ids = {}
for gen in lvgd.lv_grid.generators():
aggr = aggregate_generators(gen, aggr)
# Get the aggregated weather cell id of the area
# b
if isinstance(gen, GeneratorFluctuatingDing0):
if gen.weather_cell_id not in weather_cell_ids.keys():
weather_cell_ids[gen.weather_cell_id] = 1
else:
weather_cell_ids[gen.weather_cell_id] += 1
dingo_import_data.loc[len(dingo_import_data)] = \
[int(gen.id_db),
gen.capacity,
None]
# Get the weather cell id that occurs the most if there are any generators
if not(list(lvgd.lv_grid.generators())):
weather_cell_id = None
else:
if weather_cell_ids:
weather_cell_id = list(weather_cell_ids.keys())[
list(weather_cell_ids.values()).index(
max(weather_cell_ids.values()))]
else:
weather_cell_id = None
for v_level in aggr['generation']:
for type in aggr['generation'][v_level]:
for subtype in aggr['generation'][v_level][type]:
# make sure to check if there are any generators before assigning
# a weather cell id
if not(list(lvgd.lv_grid.generators())):
pass
else:
aggr['generation'][v_level][type][subtype]['weather_cell_id'] = \
weather_cell_id
# Determine aggregated load in MV grid
# -> Implement once laods in Ding0 MV grids exist
# Determine aggregated load in LV grid
aggr = aggregate_loads(la_center, aggr)
# Collect metadata of aggregated load areas
aggr['aggregates'] = {
'population': la_center.lv_load_area.zensus_sum,
'geom': la_center.lv_load_area.geo_area}
# Determine LV grids/ stations that are aggregated
for _ in la_center.lv_load_area._lv_grid_districts:
aggr_stations.append(_.lv_grid.station())
# add elements to lists
aggregated.update({la_center.id_db: aggr})
return aggregated, aggr_stations, dingo_import_data
def _attach_aggregated(network, grid, aggregated, ding0_grid):
"""Add Generators and Loads to MV station representing aggregated generation
capacity and load
Parameters
----------
grid: MVGrid
MV grid object
aggregated: dict
Information about aggregated load and generation capacity. For
information about the structure of the dict see ... .
ding0_grid: ding0.Network
Ding0 network container
Returns
-------
MVGrid
Altered instance of MV grid including aggregated load and generation
"""
aggr_line_type = ding0_grid.network._static_data['MV_cables'].iloc[
ding0_grid.network._static_data['MV_cables']['I_max_th'].idxmax()]
for la_id, la in aggregated.items():
# add aggregated generators
for v_level, val in la['generation'].items():
for type, val2 in val.items():
for subtype, val3 in val2.items():
if type in ['solar', 'wind']:
gen = GeneratorFluctuating(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
weather_cell_id=val3['weather_cell_id'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
else:
gen = Generator(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
grid.graph.add_node(gen, type='generator_aggr')
# backup reference of geno to LV geno list (save geno
# where the former LV genos are aggregated in)
network.dingo_import_data.set_value(network.dingo_import_data['id'].isin(val3['ids']),
'agg_geno',
gen)
# connect generator to MV station
line = Line(id='line_aggr_generator_la_' + str(la_id) + '_vlevel_{v_level}_'
'{subtype}'.format(
v_level=v_level,
subtype=subtype),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
gen,
line=line,
type='line_aggr')
for sector, sectoral_load in la['load'].items():
load = Load(
geom=grid.station.geom,
consumption={sector: sectoral_load},
grid=grid,
id='_'.join(['Load_aggregated', sector, repr(grid), str(la_id)]))
grid.graph.add_node(load, type='load')
# connect aggregated load to MV station
line = Line(id='_'.join(['line_aggr_load_la_' + str(la_id), sector, str(la_id)]),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
load,
line=line,
type='line_aggr')
def _validate_ding0_grid_import(mv_grid, ding0_mv_grid, lv_grid_mapping):
"""Cross-check imported data with original data source
Parameters
----------
mv_grid: MVGrid
eDisGo MV grid instance
ding0_mv_grid: MVGridDing0
Ding0 MV grid instance
lv_grid_mapping: dict
Translates Ding0 LV grids to associated, newly created eDisGo LV grids
"""
# Check number of components in MV grid
_validate_ding0_mv_grid_import(mv_grid, ding0_mv_grid)
# Check number of components in LV grid
_validate_ding0_lv_grid_import(mv_grid.lv_grids, ding0_mv_grid,
lv_grid_mapping)
# Check cumulative load and generation in MV grid district
_validate_load_generation(mv_grid, ding0_mv_grid)
def _validate_ding0_mv_grid_import(grid, ding0_grid):
"""Verify imported data with original data from Ding0
Parameters
----------
grid: MVGrid
MV Grid data (eDisGo)
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Notes
-----
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo.
Returns
-------
dict
Dict showing data integrity for each type of grid component
"""
integrity_checks = ['branch_tee',
'disconnection_point', 'mv_transformer',
'lv_station'#,'line',
]
data_integrity = {}
data_integrity.update({_: {'ding0': None, 'edisgo': None, 'msg': None}
for _ in integrity_checks})
# Check number of branch tees
data_integrity['branch_tee']['ding0'] = len(ding0_grid._cable_distributors)
data_integrity['branch_tee']['edisgo'] = len(
grid.graph.nodes_by_attribute('branch_tee'))
# Check number of disconnecting points
data_integrity['disconnection_point']['ding0'] = len(
ding0_grid._circuit_breakers)
data_integrity['disconnection_point']['edisgo'] = len(
grid.graph.nodes_by_attribute('mv_disconnecting_point'))
# Check number of MV transformers
data_integrity['mv_transformer']['ding0'] = len(
list(ding0_grid.station().transformers()))
data_integrity['mv_transformer']['edisgo'] = len(
grid.station.transformers)
# Check number of LV stations in MV grid (graph)
data_integrity['lv_station']['edisgo'] = len(grid.graph.nodes_by_attribute(
'lv_station'))
data_integrity['lv_station']['ding0'] = len(
[_ for _ in ding0_grid._graph.nodes()
if (isinstance(_, LVStationDing0) and
not _.grid.grid_district.lv_load_area.is_aggregated)])
# Check number of lines outside aggregated LA
# edges_w_la = grid.graph.lines()
# data_integrity['line']['edisgo'] = len([_ for _ in edges_w_la
# if not (_['adj_nodes'][0] == grid.station or
# _['adj_nodes'][1] == grid.station) and
# _['line']._length > .5])
# data_integrity['line']['ding0'] = len(
# [_ for _ in ding0_grid.lines()
# if not _['branch'].connects_aggregated])
# raise an error if data does not match
for c in integrity_checks:
if data_integrity[c]['edisgo'] != data_integrity[c]['ding0']:
raise ValueError(
'Unequal number of objects for {c}. '
'\n\tDing0:\t{ding0_no}'
'\n\teDisGo:\t{edisgo_no}'.format(
c=c,
ding0_no=data_integrity[c]['ding0'],
edisgo_no=data_integrity[c]['edisgo']))
return data_integrity
def _validate_ding0_lv_grid_import(grids, ding0_grid, lv_grid_mapping):
"""Verify imported data with original data from Ding0
Parameters
----------
grids: list of LVGrid
LV Grid data (eDisGo)
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
lv_grid_mapping: dict
Defines relationship between Ding0 and eDisGo grid objects
Notes
-----
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo.
Returns
-------
dict
Dict showing data integrity for each type of grid component
"""
integrity_checks = ['branch_tee', 'lv_transformer',
'generator', 'load','line']
data_integrity = {}
for grid in grids:
data_integrity.update({grid:{_: {'ding0': None, 'edisgo': None, 'msg': None}
for _ in integrity_checks}})
# Check number of branch tees
data_integrity[grid]['branch_tee']['ding0'] = len(
lv_grid_mapping[grid]._cable_distributors)
data_integrity[grid]['branch_tee']['edisgo'] = len(
grid.graph.nodes_by_attribute('branch_tee'))
# Check number of LV transformers
data_integrity[grid]['lv_transformer']['ding0'] = len(
list(lv_grid_mapping[grid].station().transformers()))
data_integrity[grid]['lv_transformer']['edisgo'] = len(
grid.station.transformers)
# Check number of generators
data_integrity[grid]['generator']['edisgo'] = len(
grid.generators)
data_integrity[grid]['generator']['ding0'] = len(
list(lv_grid_mapping[grid].generators()))
# Check number of loads
data_integrity[grid]['load']['edisgo'] = len(
grid.graph.nodes_by_attribute('load'))
data_integrity[grid]['load']['ding0'] = len(
list(lv_grid_mapping[grid].loads()))
# Check number of lines outside aggregated LA
data_integrity[grid]['line']['edisgo'] = len(
list(grid.graph.lines()))
data_integrity[grid]['line']['ding0'] = len(
[_ for _ in lv_grid_mapping[grid].graph_edges()
if not _['branch'].connects_aggregated])
# raise an error if data does not match
for grid in grids:
for c in integrity_checks:
if data_integrity[grid][c]['edisgo'] != data_integrity[grid][c]['ding0']:
raise ValueError(
'Unequal number of objects in grid {grid} for {c}. '
'\n\tDing0:\t{ding0_no}'
'\n\teDisGo:\t{edisgo_no}'.format(
grid=grid,
c=c,
ding0_no=data_integrity[grid][c]['ding0'],
edisgo_no=data_integrity[grid][c]['edisgo']))
def _validate_load_generation(mv_grid, ding0_mv_grid):
"""
Parameters
----------
mv_grid
ding0_mv_grid
Notes
-----
Only loads in LV grids are compared as currently Ding0 does not have MV
connected loads
"""
decimal_places = 6
tol = 10 ** -decimal_places
sectors = ['retail', 'industrial', 'agricultural', 'residential']
consumption = {_: {'edisgo': 0, 'ding0':0} for _ in sectors}
# Collect eDisGo LV loads
for lv_grid in mv_grid.lv_grids:
for load in lv_grid.graph.nodes_by_attribute('load'):
for s in sectors:
consumption[s]['edisgo'] += load.consumption.get(s, 0)
# Collect Ding0 LV loads
for la in ding0_mv_grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
for load in lvgd.lv_grid.loads():
for s in sectors:
consumption[s]['ding0'] += load.consumption.get(s, 0)
# Compare cumulative load
for k, v in consumption.items():
if v['edisgo'] != v['ding0']:
raise ValueError(
'Consumption for {sector} does not match! '
'\n\tDing0:\t{ding0}'
'\n\teDisGo:\t{edisgo}'.format(
sector=k,
ding0=v['ding0'],
edisgo=v['edisgo']))
# Compare cumulative generation capacity
mv_gens = mv_grid.graph.nodes_by_attribute('generator')
lv_gens = []
[lv_gens.extend(_.graph.nodes_by_attribute('generator'))
for _ in mv_grid.lv_grids]
gens_aggr = mv_grid.graph.nodes_by_attribute('generator_aggr')
generation = {}
generation_aggr = {}
# collect eDisGo cumulative generation capacity
for gen in mv_gens + lv_gens:
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'edisgo': 0})
generation[gen.type][gen.subtype]['edisgo'] += gen.nominal_capacity
for gen in gens_aggr:
generation_aggr.setdefault(gen.type, {})
generation_aggr[gen.type].setdefault(gen.subtype, {'edisgo': 0})
generation_aggr[gen.type][gen.subtype]['edisgo'] += gen.nominal_capacity
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'edisgo': 0})
generation[gen.type][gen.subtype]['edisgo'] += gen.nominal_capacity
# collect Ding0 MV generation capacity
for gen in ding0_mv_grid.generators():
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation[gen.type][gen.subtype].setdefault('ding0', 0)
generation[gen.type][gen.subtype]['ding0'] += gen.capacity
# Collect Ding0 LV generation capacity
for la in ding0_mv_grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
for gen in lvgd.lv_grid.generators():
if la.is_aggregated:
generation_aggr.setdefault(gen.type, {})
generation_aggr[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation_aggr[gen.type][gen.subtype].setdefault('ding0', 0)
generation_aggr[gen.type][gen.subtype]['ding0'] += gen.capacity
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {'ding0': 0})
generation[gen.type][gen.subtype].setdefault('ding0', 0)
generation[gen.type][gen.subtype]['ding0'] += gen.capacity
# Compare cumulative generation capacity
for k1, v1 in generation.items():
for k2, v2 in v1.items():
if abs(v2['edisgo'] - v2['ding0']) > tol:
raise ValueError(
'Generation capacity of {type} {subtype} does not match! '
'\n\tDing0:\t{ding0}'
'\n\teDisGo:\t{edisgo}'.format(
type=k1,
subtype=k2,
ding0=v2['ding0'],
edisgo=v2['edisgo']))
# Compare aggregated generation capacity
for k1, v1 in generation_aggr.items():
for k2, v2 in v1.items():
if abs(v2['edisgo'] - v2['ding0']) > tol:
raise ValueError(
'Aggregated generation capacity of {type} {subtype} does '
'not match! '
'\n\tDing0:\t{ding0}'
'\n\teDisGo:\t{edisgo}'.format(
type=k1,
subtype=k2,
ding0=v2['ding0'],
edisgo=v2['edisgo']))
def import_generators(network, data_source=None, file=None):
"""Import generator data from source.
The generator data include
* nom. capacity
* type ToDo: specify!
* timeseries
Additional data which can be processed (e.g. used in OEDB data) are
* location
* type
* subtype
* capacity
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
data_source: :obj:`str`
Data source. Supported sources:
* 'oedb'
file: :obj:`str`
File to import data from, required when using file-based sources.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of generators
"""
if data_source == 'oedb':
logging.warning('Right now only solar and wind generators can be '
'imported from the oedb.')
_import_genos_from_oedb(network=network)
network.mv_grid._weather_cells = None
if network.pypsa is not None:
pypsa_io.update_pypsa_generator_import(network)
elif data_source == 'pypsa':
_import_genos_from_pypsa(network=network, file=file)
else:
logger.error("Invalid option {} for generator import. Must either be "
"'oedb' or 'pypsa'.".format(data_source))
raise ValueError('The option you specified is not supported.')
def _import_genos_from_oedb(network):
"""Import generator data from the Open Energy Database (OEDB).
The importer uses SQLAlchemy ORM objects.
These are defined in ego.io,
see https://github.com/openego/ego.io/tree/dev/egoio/db_tables
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Notes
------
Right now only solar and wind generators can be imported.
"""
def _import_conv_generators(session):
"""Import conventional (conv) generators
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of medium-voltage generators
Notes
-----
You can find a full list of columns in
:func:`edisgo.data.import_data._update_grids`
"""
# build query
generators_sqla = session.query(
orm_conv_generators.columns.id,
orm_conv_generators.columns.subst_id,
orm_conv_generators.columns.la_id,
orm_conv_generators.columns.capacity,
orm_conv_generators.columns.type,
orm_conv_generators.columns.voltage_level,
orm_conv_generators.columns.fuel,
func.ST_AsText(func.ST_Transform(
orm_conv_generators.columns.geom, srid))
). \
filter(orm_conv_generators.columns.subst_id == network.mv_grid.id). \
filter(orm_conv_generators.columns.voltage_level.in_([4, 5, 6, 7])). \
filter(orm_conv_generators_version)
# read data from db
generators_mv = pd.read_sql_query(generators_sqla.statement,
session.bind,
index_col='id')
return generators_mv
def _import_res_generators(session):
"""Import renewable (res) generators
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of medium-voltage generators
:pandas:`pandas.DataFrame<dataframe>`
List of low-voltage generators
Notes
-----
You can find a full list of columns in
:func:`edisgo.data.import_data._update_grids`
If subtype is not specified it's set to 'unknown'.
"""
# Create filter for generation technologies
# ToDo: This needs to be removed when all generators can be imported
types_filter = orm_re_generators.columns.generation_type.in_(
['solar', 'wind'])
# build basic query
generators_sqla = session.query(
orm_re_generators.columns.id,
orm_re_generators.columns.subst_id,
orm_re_generators.columns.la_id,
orm_re_generators.columns.mvlv_subst_id,
orm_re_generators.columns.electrical_capacity,
orm_re_generators.columns.generation_type,
orm_re_generators.columns.generation_subtype,
orm_re_generators.columns.voltage_level,
orm_re_generators.columns.w_id,
func.ST_AsText(func.ST_Transform(
orm_re_generators.columns.rea_geom_new, srid)).label('geom'),
func.ST_AsText(func.ST_Transform(
orm_re_generators.columns.geom, srid)).label('geom_em')). \
filter(orm_re_generators.columns.subst_id == network.mv_grid.id). \
filter(orm_re_generators_version). \
filter(types_filter)
# extend basic query for MV generators and read data from db
generators_mv_sqla = generators_sqla. \
filter(orm_re_generators.columns.voltage_level.in_([4, 5]))
generators_mv = pd.read_sql_query(generators_mv_sqla.statement,
session.bind,
index_col='id')
# define generators with unknown subtype as 'unknown'
generators_mv.loc[generators_mv[
'generation_subtype'].isnull(),
'generation_subtype'] = 'unknown'
# extend basic query for LV generators and read data from db
generators_lv_sqla = generators_sqla. \
filter(orm_re_generators.columns.voltage_level.in_([6, 7]))
generators_lv = pd.read_sql_query(generators_lv_sqla.statement,
session.bind,
index_col='id')
# define generators with unknown subtype as 'unknown'
generators_lv.loc[generators_lv[
'generation_subtype'].isnull(),
'generation_subtype'] = 'unknown'
return generators_mv, generators_lv
def _update_grids(network, generators_mv, generators_lv, remove_missing=True):
"""Update imported status quo DINGO-grid according to new generator dataset
It
* adds new generators to grid if they do not exist
* updates existing generators if parameters have changed
* removes existing generators from grid which do not exist in the imported dataset
Steps:
* Step 1: MV generators: Update existing, create new, remove decommissioned
* Step 2: LV generators (single units): Update existing, remove decommissioned
* Step 3: LV generators (in aggregated MV generators): Update existing,
remove decommissioned
(aggregated MV generators = originally LV generators from aggregated Load
Areas which were aggregated during import from ding0.)
* Step 4: LV generators (single units + aggregated MV generators): Create new
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
generators_mv: :pandas:`pandas.DataFrame<dataframe>`
List of MV generators
Columns:
* id: :obj:`int` (index column)
* electrical_capacity: :obj:`float` (unit: kW)
* generation_type: :obj:`str` (e.g. 'solar')
* generation_subtype: :obj:`str` (e.g. 'solar_roof_mounted')
* voltage level: :obj:`int` (range: 4..7,)
* geom: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
* geom_em: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
generators_lv: :pandas:`pandas.DataFrame<dataframe>`
List of LV generators
Columns:
* id: :obj:`int` (index column)
* mvlv_subst_id: :obj:`int` (id of MV-LV substation in grid
= grid which the generator will be connected to)
* electrical_capacity: :obj:`float` (unit: kW)
* generation_type: :obj:`str` (e.g. 'solar')
* generation_subtype: :obj:`str` (e.g. 'solar_roof_mounted')
* voltage level: :obj:`int` (range: 4..7,)
* geom: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
* geom_em: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
remove_missing: :obj:`bool`
If true, remove generators from grid which are not included in the imported dataset.
"""
# set capacity difference threshold
cap_diff_threshold = 10 ** -4
# get existing generators in MV and LV grids
g_mv, g_lv, g_mv_agg = _build_generator_list(network=network)
# print current capacity
capacity_grid = 0
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_mv.iterrows()])
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_lv.iterrows()])
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_mv_agg.iterrows()])
logger.debug('Cumulative generator capacity (existing): {} kW'
.format(str(round(capacity_grid, 1)))
)
# ======================================
# Step 1: MV generators (existing + new)
# ======================================
logger.debug('==> MV generators')
logger.debug('{} generators imported.'
.format(str(len(generators_mv))))
# get existing genos (status quo DF format)
g_mv_existing = g_mv[g_mv['id'].isin(list(generators_mv.index.values))]
# get existing genos (new genos DF format)
generators_mv_existing = generators_mv[generators_mv.index.isin(list(g_mv_existing['id']))]
# remove existing ones from grid's geno list
g_mv = g_mv[~g_mv.isin(g_mv_existing)].dropna()
# TEMP: BACKUP 1 GENO FOR TESTING
#temp_geno = generators_mv_existing.iloc[0]
#temp_geno['geom_em'] = temp_geno['geom_em'].replace('10.667', '10.64')
# iterate over exiting generators and check whether capacity has changed
log_geno_count = 0
log_geno_cap = 0
for id, row in generators_mv_existing.iterrows():
geno_existing = g_mv_existing[g_mv_existing['id'] == id]['obj'].iloc[0]
# check if capacity equals; if not: update capacity
if abs(row['electrical_capacity'] - \
geno_existing.nominal_capacity) < cap_diff_threshold:
continue
else:
log_geno_cap += row['electrical_capacity'] - geno_existing.nominal_capacity
log_geno_count += 1
geno_existing.nominal_capacity = row['electrical_capacity']
# check if cap=0 (this may happen if dp is buggy)
if row['electrical_capacity'] <= 0:
geno_existing.grid.graph.remove_node(geno_existing)
logger.warning('Capacity of generator {} is <=0, generator removed. '
'Check your data source.'
.format(repr(geno_existing))
)
logger.debug('Capacities of {} of {} existing generators updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_mv_existing) - log_geno_count),
str(round(log_geno_cap, 1))
)
)
# new genos
log_geno_count = 0
log_geno_cap = 0
generators_mv_new = generators_mv[~generators_mv.index.isin(
list(g_mv_existing['id']))]
# remove them from grid's geno list
g_mv = g_mv[~g_mv.isin(list(generators_mv_new.index.values))].dropna()
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
#generators_mv_new = generators_mv_new.append(temp_geno)
# iterate over new generators and create them
for id, row in generators_mv_new.iterrows():
# check if geom is available, skip otherwise
geom = _check_geom(id, row)
if not geom:
logger.warning('Generator {} has no geom entry at all and will'
'not be imported!'.format(id))
continue
# create generator object and add it to MV grid's graph
if row['generation_type'] in ['solar', 'wind']:
network.mv_grid.graph.add_node(
GeneratorFluctuating(
id=id,
grid=network.mv_grid,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
weather_cell_id=row['w_id'],
geom=wkt_loads(geom)),
type='generator')
else:
network.mv_grid.graph.add_node(
Generator(id=id,
grid=network.mv_grid,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
geom=wkt_loads(geom)
),
type='generator')
log_geno_cap += row['electrical_capacity']
log_geno_count += 1
logger.debug('{} of {} new generators added ({} kW).'
.format(str(log_geno_count),
str(len(generators_mv_new)),
str(round(log_geno_cap, 1))
)
)
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_mv.empty and remove_missing:
log_geno_count = 0
for _, row in g_mv.iterrows():
log_geno_cap += row['obj'].nominal_capacity
row['obj'].grid.graph.remove_node(row['obj'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators removed ({} kW).'
.format(str(log_geno_count),
str(len(g_mv)),
str(round(log_geno_cap, 1))
)
)
# =============================================
# Step 2: LV generators (single existing units)
# =============================================
logger.debug('==> LV generators')
logger.debug('{} generators imported.'.format(str(len(generators_lv))))
# get existing genos (status quo DF format)
g_lv_existing = g_lv[g_lv['id'].isin(list(generators_lv.index.values))]
# get existing genos (new genos DF format)
generators_lv_existing = generators_lv[generators_lv.index.isin(list(g_lv_existing['id']))]
# TEMP: BACKUP 1 GENO FOR TESTING
# temp_geno = g_lv.iloc[0]
# remove existing ones from grid's geno list
g_lv = g_lv[~g_lv.isin(g_lv_existing)].dropna()
# iterate over exiting generators and check whether capacity has changed
log_geno_count = 0
log_geno_cap = 0
for id, row in generators_lv_existing.iterrows():
geno_existing = g_lv_existing[g_lv_existing['id'] == id]['obj'].iloc[0]
# check if capacity equals; if not: update capacity
if abs(row['electrical_capacity'] - \
geno_existing.nominal_capacity) < cap_diff_threshold:
continue
else:
log_geno_cap += row['electrical_capacity'] - geno_existing.nominal_capacity
log_geno_count += 1
geno_existing.nominal_capacity = row['electrical_capacity']
logger.debug('Capacities of {} of {} existing generators (single units) updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_lv_existing) - log_geno_count),
str(round(log_geno_cap, 1))
)
)
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
# g_lv.loc[len(g_lv)] = temp_geno
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_lv.empty and remove_missing:
log_geno_count = 0
for _, row in g_lv.iterrows():
log_geno_cap += row['obj'].nominal_capacity
row['obj'].grid.graph.remove_node(row['obj'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators (single units) removed ({} kW).'
.format(str(log_geno_count),
str(len(g_lv)),
str(round(log_geno_cap, 1))
)
)
# ====================================================================================
# Step 3: LV generators (existing in aggregated units (originally from aggregated LA))
# ====================================================================================
g_lv_agg = network.dingo_import_data
g_lv_agg_existing = g_lv_agg[g_lv_agg['id'].isin(list(generators_lv.index.values))]
generators_lv_agg_existing = generators_lv[generators_lv.index.isin(list(g_lv_agg_existing['id']))]
# TEMP: BACKUP 1 GENO FOR TESTING
# temp_geno = g_lv_agg.iloc[0]
g_lv_agg = g_lv_agg[~g_lv_agg.isin(g_lv_agg_existing)].dropna()
log_geno_count = 0
log_agg_geno_list = []
log_geno_cap = 0
for id, row in generators_lv_agg_existing.iterrows():
# check if capacity equals; if not: update capacity off agg. geno
cap_diff = row['electrical_capacity'] - \
g_lv_agg_existing[g_lv_agg_existing['id'] == id]['capacity'].iloc[0]
if abs(cap_diff) < cap_diff_threshold:
continue
else:
agg_geno = g_lv_agg_existing[g_lv_agg_existing['id'] == id]['agg_geno'].iloc[0]
agg_geno.nominal_capacity += cap_diff
log_geno_cap += cap_diff
log_geno_count += 1
log_agg_geno_list.append(agg_geno)
logger.debug('Capacities of {} of {} existing generators (in {} of {} aggregated units) '
'updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_lv_agg_existing) - log_geno_count),
str(len(set(log_agg_geno_list))),
str(len(g_lv_agg_existing['agg_geno'].unique())),
str(round(log_geno_cap, 1))
)
)
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
# g_lv_agg.loc[len(g_lv_agg)] = temp_geno
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_lv_agg.empty and remove_missing:
log_geno_count = 0
for _, row in g_lv_agg.iterrows():
row['agg_geno'].nominal_capacity -= row['capacity']
log_geno_cap += row['capacity']
# remove LV geno id from id string of agg. geno
id = row['agg_geno'].id.split('-')
ids = id[2].split('_')
ids.remove(str(int(row['id'])))
row['agg_geno'].id = '-'.join([id[0], id[1], '_'.join(ids)])
# after removing the LV geno from agg geno, is the agg. geno empty?
# if yes, remove it from grid
if not ids:
row['agg_geno'].grid.graph.remove_node(row['agg_geno'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators in aggregated generators removed ({} kW).'
.format(str(log_geno_count),
str(len(g_lv_agg)),
str(round(log_geno_cap, 1))
)
)
# ====================================================================
# Step 4: LV generators (new single units + genos in aggregated units)
# ====================================================================
# new genos
log_geno_count =\
log_agg_geno_new_count =\
log_agg_geno_upd_count = 0
# TEMP: BACKUP 1 GENO FOR TESTING
#temp_geno = generators_lv[generators_lv.index == g_lv_existing.iloc[0]['id']]
generators_lv_new = generators_lv[~generators_lv.index.isin(list(g_lv_existing['id'])) &
~generators_lv.index.isin(list(g_lv_agg_existing['id']))]
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
#generators_lv_new = generators_lv_new.append(temp_geno)
# dict for new agg. generators
agg_geno_new = {}
# get LV grid districts
lv_grid_dict = _build_lv_grid_dict(network)
# get predefined random seed and initialize random generator
seed = int(network.config['grid_connection']['random_seed'])
random.seed(a=seed)
# check if none of new generators can be allocated to an existing LV grid
if not any([_ in lv_grid_dict.keys()
for _ in list(generators_lv_new['mvlv_subst_id'])]):
logger.warning('None of the imported generators can be allocated '
'to an existing LV grid. Check compatibility of grid '
'and generator datasets.')
# iterate over new (single unit or part of agg. unit) generators and create them
log_geno_cap = 0
for id, row in generators_lv_new.iterrows():
lv_geno_added_to_agg_geno = False
# new unit is part of agg. LA (mvlv_subst_id is different from existing
# ones in LV grids of non-agg. load areas)
if (row['mvlv_subst_id'] not in lv_grid_dict.keys() and
row['la_id'] and not isnan(row['la_id']) and
row['mvlv_subst_id'] and not isnan(row['mvlv_subst_id'])):
# check if new unit can be added to existing agg. generator
# (LA id, type and subtype match) -> update existing agg. generator.
# Normally, this case should not occur since `subtype` of new genos
# is set to a new value (e.g. 'solar')
for _, agg_row in g_mv_agg.iterrows():
if (agg_row['la_id'] == int(row['la_id']) and
agg_row['obj'].type == row['generation_type'] and
agg_row['obj'].subtype == row['generation_subtype']):
agg_row['obj'].nominal_capacity += row['electrical_capacity']
agg_row['obj'].id += '_{}'.format(str(id))
log_agg_geno_upd_count += 1
lv_geno_added_to_agg_geno = True
if not lv_geno_added_to_agg_geno:
la_id = int(row['la_id'])
if la_id not in agg_geno_new:
agg_geno_new[la_id] = {}
if row['voltage_level'] not in agg_geno_new[la_id]:
agg_geno_new[la_id][row['voltage_level']] = {}
if row['generation_type'] not in agg_geno_new[la_id][row['voltage_level']]:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] = {}
if row['generation_subtype'] not in \
agg_geno_new[la_id][row['voltage_level']][row['generation_type']]:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']]\
.update({row['generation_subtype']: {'ids': [int(id)],
'capacity': row['electrical_capacity']
}
}
)
else:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] \
[row['generation_subtype']]['ids'].append(int(id))
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] \
[row['generation_subtype']]['capacity'] += row['electrical_capacity']
# new generator is a single (non-aggregated) unit
else:
# check if geom is available
geom = _check_geom(id, row)
if row['generation_type'] in ['solar', 'wind']:
gen = GeneratorFluctuating(
id=id,
grid=None,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
weather_cell_id=row['w_id'],
geom=wkt_loads(geom) if geom else geom)
else:
gen = Generator(id=id,
grid=None,
nominal_capacity=row[
'electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
geom=wkt_loads(geom) if geom else geom)
# TEMP: REMOVE MVLV SUBST ID FOR TESTING
#row['mvlv_subst_id'] = None
# check if MV-LV substation id exists. if not, allocate to
# random one
lv_grid = _check_mvlv_subst_id(
generator=gen,
mvlv_subst_id=row['mvlv_subst_id'],
lv_grid_dict=lv_grid_dict)
gen.grid = lv_grid
lv_grid.graph.add_node(gen, type='generator')
log_geno_count += 1
log_geno_cap += row['electrical_capacity']
# there are new agg. generators to be created
if agg_geno_new:
pfac_mv_gen = network.config['reactive_power_factor']['mv_gen']
# add aggregated generators
for la_id, val in agg_geno_new.items():
for v_level, val2 in val.items():
for type, val3 in val2.items():
for subtype, val4 in val3.items():
if type in ['solar', 'wind']:
gen = GeneratorFluctuating(
id='agg-' + str(la_id) + '-' + '_'.join([
str(_) for _ in val4['ids']]),
grid=network.mv_grid,
nominal_capacity=val4['capacity'],
type=type,
subtype=subtype,
v_level=4,
# ToDo: get correct w_id
weather_cell_id=row['w_id'],
geom=network.mv_grid.station.geom)
else:
gen = Generator(
id='agg-' + str(la_id) + '-' + '_'.join([
str(_) for _ in val4['ids']]),
nominal_capacity=val4['capacity'],
type=type,
subtype=subtype,
geom=network.mv_grid.station.geom,
grid=network.mv_grid,
v_level=4)
network.mv_grid.graph.add_node(
gen, type='generator_aggr')
# select cable type
line_type, line_count = select_cable(
network=network,
level='mv',
apparent_power=gen.nominal_capacity /
pfac_mv_gen)
# connect generator to MV station
line = Line(id='line_aggr_generator_la_' + str(la_id) + '_vlevel_{v_level}_'
'{subtype}'.format(
v_level=v_level,
subtype=subtype),
type=line_type,
kind='cable',
quantity=line_count,
length=1e-3,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(network.mv_grid.station,
gen,
line=line,
type='line_aggr')
log_agg_geno_new_count += len(val4['ids'])
log_geno_cap += val4['capacity']
logger.debug('{} of {} new generators added ({} single units, {} to existing '
'agg. generators and {} units as new aggregated generators) '
'(total: {} kW).'
.format(str(log_geno_count +
log_agg_geno_new_count +
log_agg_geno_upd_count),
str(len(generators_lv_new)),
str(log_geno_count),
str(log_agg_geno_upd_count),
str(log_agg_geno_new_count),
str(round(log_geno_cap, 1))
)
)
def _check_geom(id, row):
"""Checks if a valid geom is available in dataset
If yes, this geom will be used.
If not:
* MV generators: use geom from EnergyMap.
* LV generators: set geom to None. It is re-set in
:func:`edisgo.data.import_data._check_mvlv_subst_id`
to MV-LV station's geom. EnergyMap's geom is not used
since it is more inaccurate than the station's geom.
Parameters
----------
id : :obj:`int`
Id of generator
row : :pandas:`pandas.Series<series>`
Generator dataset
Returns
-------
:shapely:`Shapely Point object<points>` or None
Geom of generator. None, if no geom is available.
"""
geom = None
# check if geom is available
if row['geom']:
geom = row['geom']
else:
# MV generators: set geom to EnergyMap's geom, if available
if int(row['voltage_level']) in [4,5]:
# check if original geom from Energy Map is available
if row['geom_em']:
geom = row['geom_em']
logger.debug('Generator {} has no geom entry, EnergyMap\'s geom entry will be used.'
.format(id)
)
return geom
def _check_mvlv_subst_id(generator, mvlv_subst_id, lv_grid_dict):
"""Checks if MV-LV substation id of single LV generator is missing or invalid.
If so, a random one from existing stations in LV grids will be assigned.
Parameters
----------
generator : :class:`~.grid.components.Generator`
LV generator
mvlv_subst_id : :obj:`int`
MV-LV substation id
lv_grid_dict : :obj:`dict`
Dict of existing LV grids
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
Returns
-------
:class:`~.grid.grids.LVGrid`
LV grid of generator
"""
if mvlv_subst_id and not isnan(mvlv_subst_id):
# assume that given LA exists
try:
# get LV grid
lv_grid = lv_grid_dict[mvlv_subst_id]
# if no geom, use geom of station
if not generator.geom:
generator.geom = lv_grid.station.geom
logger.debug('Generator {} has no geom entry, stations\' geom will be used.'
.format(generator.id)
)
return lv_grid
# if LA/LVGD does not exist, choose random LVGD and move generator to station of LVGD
# this occurs due to exclusion of LA with peak load < 1kW
except:
lv_grid = random.choice(list(lv_grid_dict.values()))
generator.geom = lv_grid.station.geom
logger.warning('Generator {} cannot be assigned to '
'non-existent LV Grid and was '
'allocated to a random LV Grid ({}); '
'geom was set to stations\' geom.'
.format(repr(generator),
repr(lv_grid)))
pass
return lv_grid
else:
lv_grid = random.choice(list(lv_grid_dict.values()))
generator.geom = lv_grid.station.geom
logger.warning('Generator {} has no mvlv_subst_id and was '
'allocated to a random LV Grid ({}); '
'geom was set to stations\' geom.'
.format(repr(generator),
repr(lv_grid)))
pass
return lv_grid
def _validate_generation():
"""Validate generators in updated grids
The validation uses the cumulative capacity of all generators.
"""
# ToDo: Valdate conv. genos too!
# set capacity difference threshold
cap_diff_threshold = 10 ** -4
capacity_imported = generators_res_mv['electrical_capacity'].sum() + \
generators_res_lv['electrical_capacity'].sum() #+ \
#generators_conv_mv['capacity'].sum()
capacity_grid = 0
# MV genos
for geno in network.mv_grid.generators:
capacity_grid += geno.nominal_capacity
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
capacity_grid += geno.nominal_capacity
logger.debug('Cumulative generator capacity (updated): {} kW'
.format(str(round(capacity_imported, 1)))
)
if abs(capacity_imported - capacity_grid) > cap_diff_threshold:
raise ValueError('Cumulative capacity of imported generators ({} kW) '
'differ from cumulative capacity of generators '
'in updated grid ({} kW) by {} kW.'
.format(str(round(capacity_imported, 1)),
str(round(capacity_grid, 1)),
str(round(capacity_imported - capacity_grid, 1))
)
)
else:
logger.debug('Cumulative capacity of imported generators validated.')
def _validate_sample_geno_location():
if all(generators_res_lv['geom'].notnull()) \
and all(generators_res_mv['geom'].notnull()) \
and not generators_res_lv['geom'].empty \
and not generators_res_mv['geom'].empty:
# get geom of 1 random MV and 1 random LV generator and transform
sample_mv_geno_geom_shp = transform(proj2equidistant(network),
wkt_loads(generators_res_mv['geom']
.dropna()
.sample(n=1)
.item())
)
sample_lv_geno_geom_shp = transform(proj2equidistant(network),
wkt_loads(generators_res_lv['geom']
.dropna()
.sample(n=1)
.item())
)
# get geom of MV grid district
mvgd_geom_shp = transform(proj2equidistant(network),
network.mv_grid.grid_district['geom']
)
# check if MVGD contains geno
if not (mvgd_geom_shp.contains(sample_mv_geno_geom_shp) and
mvgd_geom_shp.contains(sample_lv_geno_geom_shp)):
raise ValueError('At least one imported generator is not located '
'in the MV grid area. Check compatibility of '
'grid and generator datasets.')
srid = int(network.config['geo']['srid'])
oedb_data_source = network.config['data_source']['oedb_data_source']
scenario = network.generator_scenario
if oedb_data_source == 'model_draft':
# load ORM names
orm_conv_generators_name = network.config['model_draft']['conv_generators_prefix'] + \
scenario + \
network.config['model_draft']['conv_generators_suffix']
orm_re_generators_name = network.config['model_draft']['re_generators_prefix'] + \
scenario + \
network.config['model_draft']['re_generators_suffix']
# import ORMs
orm_conv_generators = model_draft.__getattribute__(orm_conv_generators_name)
orm_re_generators = model_draft.__getattribute__(orm_re_generators_name)
# set dummy version condition (select all generators)
orm_conv_generators_version = 1 == 1
orm_re_generators_version = 1 == 1
elif oedb_data_source == 'versioned':
# load ORM names
orm_conv_generators_name = network.config['versioned']['conv_generators_prefix'] + \
scenario + \
network.config['versioned']['conv_generators_suffix']
orm_re_generators_name = network.config['versioned']['re_generators_prefix'] + \
scenario + \
network.config['versioned']['re_generators_suffix']
data_version = network.config['versioned']['version']
# import ORMs
orm_conv_generators = supply.__getattribute__(orm_conv_generators_name)
orm_re_generators = supply.__getattribute__(orm_re_generators_name)
# set version condition
orm_conv_generators_version = orm_conv_generators.columns.version == data_version
orm_re_generators_version = orm_re_generators.columns.version == data_version
# get conventional and renewable generators
with session_scope() as session:
#generators_conv_mv = _import_conv_generators(session)
generators_res_mv, generators_res_lv = _import_res_generators(
session)
#generators_mv = generators_conv_mv.append(generators_res_mv)
_validate_sample_geno_location()
_update_grids(network=network,
#generators_mv=generators_mv,
generators_mv=generators_res_mv,
generators_lv=generators_res_lv)
_validate_generation()
connect_mv_generators(network=network)
connect_lv_generators(network=network)
def _import_genos_from_pypsa(network, file):
"""Import generator data from a pyPSA file.
TBD
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
file: :obj:`str`
File including path
"""
raise NotImplementedError
# generators = pd.read_csv(file,
# comment='#',
# index_col='name',
# delimiter=',',
# decimal='.'
# )
def _build_generator_list(network):
"""Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators
"""
genos_mv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv_agg = pd.DataFrame(columns=
('la_id', 'id', 'obj'))
# MV genos
for geno in network.mv_grid.graph.nodes_by_attribute('generator'):
genos_mv.loc[len(genos_mv)] = [int(geno.id), geno]
for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'):
la_id = int(geno.id.split('-')[1].split('_')[-1])
genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno]
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
genos_lv.loc[len(genos_lv)] = [int(geno.id), geno]
return genos_mv, genos_lv, genos_lv_agg
def _build_lv_grid_dict(network):
"""Creates dict of LV grids
LV grid ids are used as keys, LV grid references as values.
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Returns
-------
:obj:`dict`
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
"""
lv_grid_dict = {}
for lv_grid in network.mv_grid.lv_grids:
lv_grid_dict[lv_grid.id] = lv_grid
return lv_grid_dict
def import_feedin_timeseries(config_data, weather_cell_ids):
"""
Import RES feed-in time series data and process
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
weather_cell_ids : :obj:`list`
List of weather cell id's (integers) to obtain feed-in data for.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Feedin time series
"""
def _retrieve_timeseries_from_oedb(session):
"""Retrieve time series from oedb
"""
# ToDo: add option to retrieve subset of time series
# ToDo: find the reference power class for mvgrid/w_id and insert instead of 4
feedin_sqla = session.query(
orm_feedin.w_id,
orm_feedin.source,
orm_feedin.feedin). \
filter(orm_feedin.w_id.in_(weather_cell_ids)). \
filter(orm_feedin.power_class.in_([0, 4])). \
filter(orm_feedin_version)
feedin = pd.read_sql_query(feedin_sqla.statement,
session.bind,
index_col=['source', 'w_id'])
return feedin
if config_data['data_source']['oedb_data_source'] == 'model_draft':
orm_feedin_name = config_data['model_draft']['res_feedin_data']
orm_feedin = model_draft.__getattribute__(orm_feedin_name)
orm_feedin_version = 1 == 1
else:
orm_feedin_name = config_data['versioned']['res_feedin_data']
orm_feedin = supply.__getattribute__(orm_feedin_name)
orm_feedin_version = orm_feedin.version == config_data['versioned'][
'version']
with session_scope() as session:
feedin = _retrieve_timeseries_from_oedb(session)
feedin.sort_index(axis=0, inplace=True)
timeindex = pd.date_range('1/1/2011', periods=8760, freq='H')
recasted_feedin_dict = {}
for type_w_id in feedin.index:
recasted_feedin_dict[type_w_id] = feedin.loc[
type_w_id, :].values[0]
feedin = pd.DataFrame(recasted_feedin_dict, index=timeindex)
# rename 'wind_onshore' and 'wind_offshore' to 'wind'
new_level = [_ if _ not in ['wind_onshore']
else 'wind' for _ in feedin.columns.levels[0]]
feedin.columns.set_levels(new_level, level=0, inplace=True)
feedin.columns.rename('type', level=0, inplace=True)
feedin.columns.rename('weather_cell_id', level=1, inplace=True)
return feedin
def import_load_timeseries(config_data, data_source, mv_grid_id=None,
year=None):
"""
Import load time series
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
data_source : str
Specify type of data source. Available data sources are
* 'demandlib'
Determine a load time series with the use of the demandlib.
This calculates standard load profiles for 4 different sectors.
mv_grid_id : :obj:`str`
MV grid ID as used in oedb. Provide this if `data_source` is 'oedb'.
Default: None.
year : int
Year for which to generate load time series. Provide this if
`data_source` is 'demandlib'. Default: None.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
"""
def _import_load_timeseries_from_oedb(config_data, mv_grid_id):
"""
Retrieve load time series from oedb
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
Notes
------
This is currently not a valid option to retrieve load time series
since time series in the oedb are not differentiated by sector. An
issue concerning this has been created.
"""
if config_data['versioned']['version'] == 'model_draft':
orm_load_name = config_data['model_draft']['load_data']
orm_load = model_draft.__getattribute__(orm_load_name)
orm_load_areas_name = config_data['model_draft']['load_areas']
orm_load_areas = model_draft.__getattribute__(orm_load_areas_name)
orm_load_version = 1 == 1
else:
orm_load_name = config_data['versioned']['load_data']
# orm_load = supply.__getattribute__(orm_load_name)
# ToDo: remove workaround
orm_load = model_draft.__getattribute__(orm_load_name)
# orm_load_version = orm_load.version == config.data['versioned']['version']
orm_load_areas_name = config_data['versioned']['load_areas']
# orm_load_areas = supply.__getattribute__(orm_load_areas_name)
# ToDo: remove workaround
orm_load_areas = model_draft.__getattribute__(orm_load_areas_name)
# orm_load_areas_version = orm_load.version == config.data['versioned']['version']
orm_load_version = 1 == 1
with session_scope() as session:
load_sqla = session.query( # orm_load.id,
orm_load.p_set,
orm_load.q_set,
orm_load_areas.subst_id). \
join(orm_load_areas, orm_load.id == orm_load_areas.otg_id). \
filter(orm_load_areas.subst_id == mv_grid_id). \
filter(orm_load_version). \
distinct()
load = pd.read_sql_query(load_sqla.statement,
session.bind,
index_col='subst_id')
return load
def _load_timeseries_demandlib(config_data, year):
"""
Get normalized sectoral load time series
Time series are normalized to 1 kWh consumption per year
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
year : int
Year for which to generate load time series.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
"""
sectoral_consumption = {'h0': 1, 'g0': 1, 'i0': 1, 'l0': 1}
cal = Germany()
holidays = dict(cal.holidays(year))
e_slp = bdew.ElecSlp(year, holidays=holidays)
# multiply given annual demand with timeseries
elec_demand = e_slp.get_profile(sectoral_consumption)
# Add the slp for the industrial group
ilp = profiles.IndustrialLoadProfile(e_slp.date_time_index,
holidays=holidays)
# Beginning and end of workday, weekdays and weekend days, and scaling
# factors by default
elec_demand['i0'] = ilp.simple_profile(
sectoral_consumption['i0'],
am=datetime.time(config_data['demandlib']['day_start'].hour,
config_data['demandlib']['day_start'].minute, 0),
pm=datetime.time(config_data['demandlib']['day_end'].hour,
config_data['demandlib']['day_end'].minute, 0),
profile_factors=
{'week': {'day': config_data['demandlib']['week_day'],
'night': config_data['demandlib']['week_night']},
'weekend': {'day': config_data['demandlib']['weekend_day'],
'night': config_data['demandlib']['weekend_night']}})
# Resample 15-minute values to hourly values and sum across sectors
elec_demand = elec_demand.resample('H').mean()
return elec_demand
if data_source == 'oedb':
load = _import_load_timeseries_from_oedb(config_data, mv_grid_id)
elif data_source == 'demandlib':
load = _load_timeseries_demandlib(config_data, year)
load.rename(columns={'g0': 'retail', 'h0': 'residential',
'l0': 'agricultural', 'i0': 'industrial'},
inplace=True)
return load
| agpl-3.0 |
ooici/coi-services | ion/services/sa/product/data_product_management_service.py | 1 | 77120 | #!/usr/bin/env python
__author__ = 'Maurice Manning'
from pyon.public import log, IonObject
from pyon.util.containers import DotDict
from pyon.core.object import IonObjectBase
from pyon.core.exception import BadRequest, NotFound
from pyon.public import RT, OT, PRED, LCS, CFG
from pyon.util.ion_time import IonTime
from pyon.ion.resource import ExtendedResourceContainer
from pyon.event.event import EventPublisher
from pyon.util.arg_check import validate_is_instance, validate_is_not_none, validate_false, validate_true
from pyon.net.endpoint import RPCClient
from ion.services.dm.utility.granule_utils import RecordDictionaryTool
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from ion.util.time_utils import TimeUtils
from ion.util.geo_utils import GeoUtils
from interface.services.sa.idata_product_management_service import BaseDataProductManagementService
from interface.objects import ComputedValueAvailability, DataProductTypeEnum, Dataset, CoverageTypeEnum, ParameterContext
from interface.objects import DataProduct, DataProductVersion, InformationStatus, DataProcess, DataProcessTypeEnum, Device
from coverage_model import QuantityType, ParameterDictionary, NumexprFunction, ParameterFunctionType
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from ion.services.dm.utility.test.parameter_helper import ParameterHelper
from pyon.datastore.datastore import DataStore
from pyon.datastore.datastore_query import DatastoreQueryBuilder, DQ
from lxml import etree
from datetime import datetime
import numpy as np
import string, StringIO
import networkx as nx
import matplotlib.pyplot as plt
from collections import deque
from pyon.core.governance import ORG_MANAGER_ROLE, DATA_OPERATOR, OBSERVATORY_OPERATOR, INSTRUMENT_OPERATOR, GovernanceHeaderValues, has_org_role
from pyon.core.exception import Inconsistent
import re
import functools
from pyon.util.breakpoint import debug_wrapper
class DataProductManagementService(BaseDataProductManagementService):
""" @author Bill Bollenbacher
@file ion/services/sa/product/data_product_management_service.py
@brief Implementation of the data product management service
"""
def on_init(self):
self.RR2 = EnhancedResourceRegistryClient(self.clients.resource_registry)
def create_data_product(self, data_product=None, stream_definition_id='', exchange_point='', dataset_id='', parent_data_product_id='', default_stream_configuration=None):
"""
Creates a data product Resource.
@param data_product - The data product resource
@param stream_definition_id - The stream definition points to the parameter dictionary and defines the
parameters for this data product
@param exchange_point - Which exchange point on the broker to use for streaming purposes
@param dataset_id - It's possible to create a data product from an already existing dataset,
this is the dataset to use and point to
@param parent_data_product_id - The id of the parent data product, this is for the case where a derived
data product is created
@param default_stream_configuration - A configuration for how to name the streams coming from the agent
"""
if data_product.category == DataProductTypeEnum.DEVICE:
return self.create_device_data_product(data_product, stream_definition_id, default_stream_configuration)
elif data_product.category == DataProductTypeEnum.SITE:
return self.create_site_data_product(data_product, stream_definition_id)
elif data_product.category == DataProductTypeEnum.DERIVED:
return self.create_derived_data_product(data_product, parent_data_product_id, stream_definition_id)
elif data_product.category == DataProductTypeEnum.EXTERNAL:
return self.create_external_data_product(data_product, stream_definition_id)
else:
raise BadRequest("Unrecognized Data Product Type")
def create_device_data_product(self, data_product=None, stream_definition_id='', stream_configuration=None):
'''
Creates a data product resource and a stream for the data product.
'''
if not data_product.category == DataProductTypeEnum.DEVICE:
raise BadRequest("Attempted to create a Device Data Product without the proper type category")
data_product_id = self.create_data_product_(data_product)
# WARNING: This creates a Stream as a side effect!!
self.assign_stream_definition_to_data_product(data_product_id=data_product_id,
stream_definition_id=stream_definition_id,
stream_configuration=stream_configuration)
return data_product_id
def create_derived_data_product(self, data_product=None, parent_data_product_id='', stream_definition_id=''):
'''
Creates a derived data product
'''
if not data_product.category == DataProductTypeEnum.DERIVED:
raise BadRequest("Attempted to create a Derived Data Product without the proper type category")
# Store the resource
data_product_id = self.create_data_product_(data_product)
# Associate the stream definition with the data product, BUT DONT MAKE A STREAM
self.RR2.assign_stream_definition_to_data_product_with_has_stream_definition(stream_definition_id,
data_product_id)
# Associate the data product to its parent
self.assign_data_product_to_data_product(data_product_id=data_product_id, parent_data_product_id=parent_data_product_id)
# Associate the dataset of the parent with this data product
dataset_ids, _ = self.clients.resource_registry.find_objects(parent_data_product_id, predicate=PRED.hasDataset, id_only=True)
for dataset_id in dataset_ids:
self.assign_dataset_to_data_product(data_product_id, dataset_id)
# If there were physical datasets
if dataset_ids:
self.create_catalog_entry(data_product_id)
self._check_qc(data_product_id)
return data_product_id
def create_site_data_product(self, data_product=None, stream_definition_id=''):
'''
Creates a site data product
'''
if not data_product.category == DataProductTypeEnum.SITE:
raise BadRequest("Attempted to create a Site Data Product without the proper type category")
# Store the resource
data_product_id = self.create_data_product_(data_product)
# Associate the stream definition with the data product, BUT DONT MAKE A STREAM
self.RR2.assign_stream_definition_to_data_product_with_has_stream_definition(stream_definition_id,
data_product_id)
return data_product_id
def create_external_data_product(self, data_product=None, stream_definition_id=''):
'''
Creates an external data product
'''
if not data_product.category == DataProductTypeEnum.EXTERNAL:
raise BadRequest("Attempted to create a External Data Product without the proper type category")
# Store the resource
data_product_id = self.create_data_product_(data_product)
# Associate the stream definition with the data product, BUT DONT MAKE A STREAM
self.RR2.assign_stream_definition_to_data_product_with_has_stream_definition(stream_definition_id,
data_product_id)
return data_product_id
def create_data_product_(self, data_product=None):
validate_is_not_none(data_product, 'A data product (ion object) must be passed to register a data product')
# if the geospatial_bounds is set then calculate the geospatial_point_center
if data_product and data_product.type_ == RT.DataProduct:
data_product.geospatial_point_center = GeoUtils.calc_geospatial_point_center(data_product.geospatial_bounds)
log.debug("create_data_product data_product.geospatial_point_center: %s" % data_product.geospatial_point_center)
#--------------------------------------------------------------------------------
# Register - create and store a new DataProduct resource using provided metadata
#--------------------------------------------------------------------------------
data_product_id = self.RR2.create(data_product, RT.DataProduct)
return data_product_id
def create_data_processes(self, data_product_id=''):
'''
For each data process launched also create a dataprocess for each parameter function in the data product
'''
data_product = self.read_data_product(data_product_id)
# DataProduct -> StreamDefinition
stream_def_ids, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasStreamDefinition, id_only=True)
pdict_ids = []
# StreamDefinition -> ParameterDictionary
for stream_def_id in stream_def_ids:
pd_ids, _ = self.clients.resource_registry.find_objects(stream_def_id, PRED.hasParameterDictionary, id_only=True)
pdict_ids.extend(pd_ids)
pd_ids = []
# ParameterDictionary -> ParameterContext
for pdict_id in pdict_ids:
pdef_ids, _ = self.clients.resource_registry.find_objects(pdict_id, PRED.hasParameterContext, id_only=True)
pd_ids.extend(pdef_ids)
pf_ids = []
pc_name_map = {}
# ParameterContext -> ParameterFunction
for pd_id in pd_ids:
pfunc_objs, _ = self.clients.resource_registry.find_objects(pd_id, PRED.hasParameterFunction, id_only=False)
for pfunc_obj in pfunc_objs:
pf_ids.append(pfunc_obj._id)
pc_name_map[pfunc_obj._id] = pfunc_obj.name
dpds = []
dpd_name_map = {}
# DataProcessDefinition -> ParameterFunction
for pf_id in pf_ids:
dpdef_objs, _ = self.clients.resource_registry.find_subjects(object=pf_id,
predicate=PRED.hasParameterFunction,
subject_type=RT.DataProcessDefinition,
id_only=False)
for dpdef_obj in dpdef_objs:
dpd_name_map[dpdef_obj._id] = pc_name_map[pf_id]
dpds.append(dpdef_obj)
for dpd in dpds:
dp = DataProcess()
#dp.name = 'Data Process %s for Data Product %s' % ( dpd.name, data_product.name )
dp.name = dpd_name_map[dpd._id]
# TODO: This is a stub until DPD is ready
dp_id, _ = self.clients.resource_registry.create(dp)
self.clients.resource_registry.create_association(dpd._id, PRED.hasDataProcess, dp_id)
self.clients.resource_registry.create_association(dp_id, PRED.hasOutputProduct, data_product._id)
def _check_qc(self, data_product_id):
'''
Creates the necessary QC parameters where the "qc_applications" attribute is specified.
'''
data_product = self.read_data_product(data_product_id)
parameters = self.get_data_product_parameters(data_product_id)
parameter_names = [p.name for p in parameters]
pmap = {}
# Make a map from the ooi short name to the parameter object
for p in parameters:
if p.ooi_short_name:
pmap[re.sub(r'_L[0-2]', '', p.ooi_short_name)] = p
for sname, qc_applicability in data_product.qc_applications.iteritems():
parameter_list = self._generate_qc(pmap[sname], qc_applicability)
for parameter in parameter_list:
if parameter.name in parameter_names: # Parameter already exists
continue
parameter_id = self.clients.dataset_management.create_parameter(parameter)
self.add_parameter_to_data_product(parameter_id, data_product_id)
return True
def _generate_qc(self, parameter, qc_list):
sname = parameter.ooi_short_name
# DATAPROD_ALGORTHM_QC
# drop the _L?
sname = re.sub(r'_L[0-2]', '', sname)
retval = []
for qc_thing in qc_list:
if qc_thing not in ('qc_glblrng', 'qc_gradtst', 'qc_trndtst', 'qc_spketst', 'qc_loclrng', 'qc_stuckvl'):
log.warning("Invalid QC: %s", qc_thing)
continue
qc_thing = qc_thing.replace('qc_', '')
new_param_name = '_'.join([sname, qc_thing.upper(), 'QC'])
parameter = ParameterContext(new_param_name.lower(),
parameter_type='quantity',
value_encoding='int8',
ooi_short_name=new_param_name,
display_name=' '.join([sname, qc_thing.upper()]),
units='1',
description=' '.join([qc_thing.upper(), 'Quality Control for', sname]),
fill_value=-88
)
retval.append(parameter)
return retval
'''
{ TEMPWAT_L1: [qc_glblrng, qc_gradtst],
DENSITY_L2: [qc_glblrng] }
'''
def assign_stream_definition_to_data_product(self, data_product_id='', stream_definition_id='', exchange_point='', stream_configuration=None):
validate_is_not_none(data_product_id, 'A data product id must be passed to register a data product')
validate_is_not_none(stream_definition_id, 'A stream definition id must be passed to assign to a data product')
stream_def_obj = self.clients.pubsub_management.read_stream_definition(stream_definition_id) # Validates and checks for param_dict
parameter_dictionary = stream_def_obj.parameter_dictionary
validate_is_not_none(parameter_dictionary, 'A parameter dictionary must be passed to register a data product')
exchange_point = exchange_point or 'science_data'
data_product = self.RR2.read(data_product_id)
#if stream_definition_id:
#@todo: What about topics?
# Associate the StreamDefinition with the data product
self.RR2.assign_stream_definition_to_data_product_with_has_stream_definition(stream_definition_id,
data_product_id)
stream_name = ''
stream_type = ''
if stream_configuration is not None:
stream_name = stream_configuration.stream_name
stream_type = stream_configuration.stream_type
stream_id, route = self.clients.pubsub_management.create_stream(name=data_product.name,
exchange_point=exchange_point,
description=data_product.description,
stream_definition_id=stream_definition_id,
stream_name=stream_name,
stream_type=stream_type)
# Associate the Stream with the main Data Product and with the default data product version
self.RR2.assign_stream_to_data_product_with_has_stream(stream_id, data_product_id)
def assign_dataset_to_data_product(self, data_product_id='', dataset_id=''):
validate_is_not_none(data_product_id, 'A data product id must be passed to assign a dataset to a data product')
validate_is_not_none(dataset_id, 'A dataset id must be passed to assign a dataset to a data product')
self.RR2.assign_dataset_to_data_product_with_has_dataset(dataset_id, data_product_id)
def assign_data_product_to_data_product(self, data_product_id='', parent_data_product_id=''):
validate_true(data_product_id, 'A data product id must be specified')
validate_true(parent_data_product_id, 'A data product id must be specified')
self.RR2.assign_data_product_to_data_product_with_has_data_product_parent(parent_data_product_id, data_product_id)
def read_data_product(self, data_product_id=''):
"""
method docstring
"""
# Retrieve all metadata for a specific data product
# Return data product resource
data_product = self.RR2.read(data_product_id, RT.DataProduct)
return data_product
def update_data_product(self, data_product=None):
"""
@todo document this interface!!!
@param data_product DataProduct
@throws NotFound object with specified id does not exist
"""
# if the geospatial_bounds is set then calculate the geospatial_point_center
if data_product and data_product.type_ == RT.DataProduct:
data_product.geospatial_point_center = GeoUtils.calc_geospatial_point_center(data_product.geospatial_bounds)
original = self.RR2.read(data_product._id)
self.RR2.update(data_product, RT.DataProduct)
if self._metadata_changed(original, data_product):
self.update_catalog_entry(data_product._id)
if self._qc_application_changed(original, data_product):
self._check_qc(data_product._id)
def _metadata_changed(self, original_dp, new_dp):
from ion.processes.data.registration.registration_process import RegistrationProcess
for field in RegistrationProcess.catalog_metadata:
if hasattr(original_dp, field) and getattr(original_dp, field) != getattr(new_dp, field):
return True
return False
def _qc_application_changed(self, original_dp, new_dp):
retval = original_dp.qc_applications != new_dp.qc_applications
return retval
def delete_data_product(self, data_product_id=''):
#--------------------------------------------------------------------------------
# suspend persistence
#--------------------------------------------------------------------------------
if self.is_persisted(data_product_id):
self.suspend_data_product_persistence(data_product_id)
#--------------------------------------------------------------------------------
# remove stream associations
#--------------------------------------------------------------------------------
stream_ids, assoc_ids = self.clients.resource_registry.find_objects(data_product_id, PRED.hasStream, RT.Stream, True)
for stream, assoc in zip(stream_ids,assoc_ids):
self.clients.resource_registry.delete_association(assoc)
self.clients.pubsub_management.delete_stream(stream_ids[0])
#--------------------------------------------------------------------------------
# retire the data product
#--------------------------------------------------------------------------------
self.RR2.lcs_delete(data_product_id, RT.DataProduct)
def force_delete_data_product(self, data_product_id=''):
#get the assoc producers before deleteing the links
producer_ids = self.RR2.find_data_producer_ids_of_data_product_using_has_data_producer(data_product_id)
for producer_id in producer_ids:
self.RR2.delete(producer_id)
self.RR2.force_delete(data_product_id, RT.DataProduct)
def find_data_products(self, filters=None):
"""
method docstring
"""
# Validate the input filter and augment context as required
# Define set of resource attributes to filter on, change parameter from "filter" to include attributes and filter values.
# potentially: title, keywords, date_created, creator_name, project, geospatial coords, time range
# Call DM DiscoveryService to query the catalog for matches
# Organize and return the list of matches with summary metadata (title, summary, keywords)
ret, _ = self.clients.resource_registry.find_resources(RT.DataProduct, None, None, False)
return ret
def get_data_product_updates(self, data_product_id_list=None, since_timestamp="" ):
# For a list of data products, retrieve events since the given timestamp. The return is a dict
# of dp_id to a dict containing dataset_id, updated: boolean, current geospatial bounds, current temporal bounds.
event_objs = None
response_data = {}
# get the passed parameters. At least the data product id and start_time
# should be specified
if data_product_id_list == None or len(data_product_id_list) == 0:
raise BadRequest("Please pass a valid data_product_id")
# Build query structure for fetching events for the data products passed
try:
dqb = DatastoreQueryBuilder(datastore=DataStore.DS_EVENTS, profile=DataStore.DS_PROFILE.EVENTS)
filter_origins = dqb.in_(DQ.EA_ORIGIN, *data_product_id_list)
filter_types = dqb.in_(DQ.ATT_TYPE, "ResourceModifiedEvent")
filter_mindate = dqb.gte(DQ.RA_TS_CREATED, since_timestamp)
where = dqb.and_(filter_origins, filter_types, filter_mindate)
order_by = dqb.order_by([["ts_created", "desc"]]) # Descending order by time
dqb.build_query(where=where, order_by=order_by, limit=100000, skip=0, id_only=False)
query = dqb.get_query()
event_objs = self.container.event_repository.event_store.find_by_query(query)
except Exception as ex:
log.error("Error querying for events for specified data products: %s", ex.message)
event_objs = []
# Start populating the response structure
for data_product_id in data_product_id_list:
response_data[data_product_id] = {#"dataset_id" : None,
"updated" : False,
"current_geospatial_bounds" : None,
"current_temporal_bounds" : None}
""" Commented till Maurice decides he needs the dataset ids in the response
# Need dataset id in response data
ds_ids,_ = self.clients.resource_registry.find_objects(subject=data_product_id,
predicate=PRED.hasDataset,
id_only=True)
if (ds_ids and len(ds_ids) > 0):
response_data[data_product_id]["dataset_id"] = ds_ids[0]
"""
# if we find any UPDATE event for this data_product_id. This type of dumb iteration
# is slow but since the returned events for all data_product_ids in the list are returned
# as one big list, there is no way to narrow down the search for UPDATE events
for event_obj in event_objs:
if event_obj.origin == data_product_id and event_obj.sub_type == "UPDATE":
response_data[data_product_id]["updated"] = True
continue
# Get information about the current geospatial and temporal bounds
dp_obj = self.clients.resource_registry.read(data_product_id)
if dp_obj:
response_data[data_product_id]["current_geospatial_bounds"] = dp_obj.geospatial_bounds
response_data[data_product_id]["current_temporal_bounds"] = dp_obj.nominal_datetime
return response_data
def create_dataset_for_data_product(self, data_product_id=''):
'''
Create a dataset for a data product
'''
#-----------------------------------------------------------------------------------------
# Step 1: Collect related resources
data_product_obj = self.RR2.read(data_product_id)
validate_is_not_none(data_product_obj, "The data product id should correspond to a valid registered data product.")
stream_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)
if data_product_obj.category == DataProductTypeEnum.DEVICE:
if not stream_ids:
raise BadRequest('Specified DataProduct has no streams associated with it')
stream_id = stream_ids[0]
else:
stream_id = None
stream_defs, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStreamDefinition,id_only=True)
if not stream_defs:
raise BadRequest("Data Product stream is without a stream definition")
stream_def_id = stream_defs[0]
parameter_dictionary_ids, _ = self.clients.resource_registry.find_objects(stream_def_id, PRED.hasParameterDictionary, id_only=True)
if not parameter_dictionary_ids:
raise BadRequest("Data Product stream is without a parameter dictionary")
parameter_dictionary_id = parameter_dictionary_ids[0]
parent_data_product_ids, _ = self.clients.resource_registry.find_objects(data_product_id, predicate=PRED.hasDataProductParent, id_only=True)
if len(parent_data_product_ids) == 1: # This is a child data product
raise BadRequest("Child Data Products shouldn't be activated")
child_data_product_ids, _ = self.clients.resource_registry.find_subjects(object=data_product_id, predicate=PRED.hasDataProductParent, id_only=True)
dataset_ids, _ = self.clients.resource_registry.find_objects(data_product_id, predicate=PRED.hasDataset, id_only=True)
#-----------------------------------------------------------------------------------------
# Step 2: Create and associate Dataset (coverage)
# If there's already a dataset, just return that
if dataset_ids:
return dataset_ids[0]
dataset_id = self._create_dataset(data_product_obj, parameter_dictionary_id)
# Also assign the stream to the dataset
if stream_id:
self.RR2.assign_stream_to_dataset_with_has_stream(stream_id, dataset_id)
# link dataset with data product. This creates the association in the resource registry
self.RR2.assign_dataset_to_data_product_with_has_dataset(dataset_id, data_product_id)
# Link this dataset with the child data products AND
# create catalog entries for the child data products
for child_dp_id in child_data_product_ids:
self.assign_dataset_to_data_product(child_dp_id, dataset_id)
self.create_catalog_entry(data_product_id=data_product_id)
# register the dataset for externalization
self.create_catalog_entry(data_product_id=data_product_id)
return dataset_id
def _create_dataset(self, data_product, parameter_dictionary_id):
# Device -> Simplex, Site -> Complex
if data_product.category == DataProductTypeEnum.DEVICE:
dataset = Dataset(name=data_product.name,
description='Dataset for Data Product %s' % data_product._id,
coverage_type=CoverageTypeEnum.SIMPLEX)
elif data_product.category == DataProductTypeEnum.SITE:
dataset = Dataset(name=data_product.name,
description='Dataset for Data Product %s' % data_product._id,
coverage_type=CoverageTypeEnum.COMPLEX)
# No datasets are currently linked which means we need to create a new one
dataset_id = self.clients.dataset_management.create_dataset(dataset,
parameter_dictionary_id=parameter_dictionary_id)
return dataset_id
def activate_data_product_persistence(self, data_product_id=''):
"""Persist data product data into a data set
@param data_product_id str
@throws NotFound object with specified id does not exist
"""
#-----------------------------------------------------------------------------------------
# Step 1: Collect related resources
data_product_obj = self.RR2.read(data_product_id)
validate_is_not_none(data_product_obj, "The data product id should correspond to a valid registered data product.")
stream_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)
if not stream_ids:
raise BadRequest('Specified DataProduct has no streams associated with it')
stream_id = stream_ids[0]
dataset_id = self.create_dataset_for_data_product(data_product_id)
#-----------------------------------------------------------------------------------------
# Step 2: Configure and start ingestion with lookup values
# grab the ingestion configuration id from the data_product in order to use to persist it
if data_product_obj.dataset_configuration_id:
ingestion_configuration_id = data_product_obj.dataset_configuration_id
else:
ingestion_configuration_id = self.clients.ingestion_management.list_ingestion_configurations(id_only=True)[0]
# Identify lookup tables
config = DotDict()
if self._has_lookup_values(data_product_id):
config.process.input_product = data_product_id
config.process.lookup_docs = self._get_lookup_documents(data_product_id)
# persist the data stream using the ingestion config id and stream id
# find datasets for the data product
dataset_id = self.clients.ingestion_management.persist_data_stream(stream_id=stream_id,
ingestion_configuration_id=ingestion_configuration_id,
dataset_id=dataset_id,
config=config)
#--------------------------------------------------------------------------------
# todo: dataset_configuration_obj contains the ingest config for now...
# Update the data product object and sent event
#--------------------------------------------------------------------------------
data_product_obj.dataset_configuration_id = ingestion_configuration_id
self.update_data_product(data_product_obj)
self._publish_persist_event(data_product_id=data_product_id, persist_on = True)
self.create_data_processes(data_product_id)
def is_persisted(self, data_product_id=''):
# Is the data product currently persisted into a data set?
try:
if data_product_id:
stream_id = self.RR2.find_stream_id_of_data_product_using_has_stream(data_product_id)
return self.clients.ingestion_management.is_persisted(stream_id)
except NotFound:
pass
return False
def _publish_persist_event(self, data_product_id=None, persist_on=True):
try:
if data_product_id:
if persist_on:
persist_type = 'PERSIST_ON'
description = 'Data product is persisted.'
else:
persist_type= 'PERSIST_OFF'
description= 'Data product is not currently persisted'
pub = EventPublisher(OT.InformationContentStatusEvent, process=self)
event_data = dict(origin_type=RT.DataProduct,
origin=data_product_id or "",
sub_type=persist_type,
status = InformationStatus.NORMAL,
description = description)
pub.publish_event(**event_data)
except Exception as ex:
log.error("Error publishing InformationContentStatusEvent for data product: %s", data_product_id)
def suspend_data_product_persistence(self, data_product_id=''):
"""Suspend data product data persistence into a data set, multiple options
@param data_product_id str
@throws NotFound object with specified id does not exist
"""
#--------------------------------------------------------------------------------
# retrieve the data_process object
#--------------------------------------------------------------------------------
data_product_obj = self.clients.resource_registry.read(data_product_id)
validate_is_not_none(data_product_obj, 'Should not have been empty')
validate_is_instance(data_product_obj, DataProduct)
parent_dp_ids, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasDataProductParent, id_only=True)
if not data_product_obj.dataset_configuration_id:
if parent_dp_ids:
# It's a derived data product, we're done here
return
raise NotFound("Data Product %s dataset configuration does not exist" % data_product_id)
#--------------------------------------------------------------------------------
# get the Stream associated with this data product; if no stream then create one, if multiple streams then Throw
#streams = self.data_product.find_stemming_stream(data_product_id)
#--------------------------------------------------------------------------------
# if this data product is not currently being persisted, then just flag with a warning.
if self.is_persisted(data_product_id):
try:
log.debug("Attempting to find stream")
stream_id = self.RR2.find_stream_id_of_data_product_using_has_stream(data_product_id)
log.debug("stream found")
validate_is_not_none(stream_id, 'Data Product %s must have one stream associated' % str(data_product_id))
self.clients.ingestion_management.unpersist_data_stream(stream_id=stream_id,
ingestion_configuration_id=data_product_obj.dataset_configuration_id)
self._publish_persist_event(data_product_id=data_product_id, persist_on=False)
except NotFound:
if data_product_obj.lcstate == LCS.DELETED:
log.debug("stream not found, but assuming it was from a deletion")
log.error("Attempted to suspend_data_product_persistence on a retired data product")
else:
log.debug("stream not found, assuming error")
raise
else:
log.warning('Data product is not currently persisted, no action taken: %s', data_product_id)
def add_parameter_to_data_product(self, parameter_context_id='', data_product_id=''):
data_product = self.read_data_product(data_product_id)
data_product = self.read_data_product(data_product_id)
pc = self.clients.dataset_management.read_parameter_context(parameter_context_id)
stream_def_ids, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasStreamDefinition, id_only=False)
stream_def = stream_def_ids[0]
pdict_ids, _ = self.clients.resource_registry.find_objects(stream_def._id, PRED.hasParameterDictionary,id_only=True)
pdict_id = pdict_ids[0]
self.clients.resource_registry.create_association(subject=pdict_id, predicate=PRED.hasParameterContext, object=parameter_context_id)
if stream_def.available_fields:
stream_def.available_fields.append(pc.name)
self.clients.resource_registry.update(stream_def)
datasets, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasDataset, id_only=True)
if datasets:
dataset_id = datasets[0]
self.clients.dataset_management.add_parameter_to_dataset(parameter_context_id, dataset_id)
self.update_catalog_entry(data_product_id)
#--------------------------------------------------------------------------------
# detach the dataset from this data product
#--------------------------------------------------------------------------------
# dataset_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasDataset, id_only=True)
# for dataset_id in dataset_ids:
# self.data_product.unlink_data_set(data_product_id, dataset_id)
def _get_reference_designator(self, data_product_id=''):
'''
Returns the reference designator for a data product if it has one
'''
device_ids, _ = self.clients.resource_registry.find_subjects(object=data_product_id, predicate=PRED.hasOutputProduct, subject_type=RT.InstrumentDevice, id_only=True)
if not device_ids:
raise BadRequest("No instrument device associated with this data product")
device_id = device_ids[0]
sites, _ = self.clients.resource_registry.find_subjects(object=device_id, predicate=PRED.hasDevice, subject_type=RT.InstrumentSite, id_only=False)
if not sites:
raise BadRequest("No site is associated with this data product")
site = sites[0]
rd = site.reference_designator
return rd
def get_data_product_stream_definition(self, data_product_id=''):
self.read_data_product(data_product_id)
streams, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)
for stream in streams:
stream_defs, _ = self.clients.resource_registry.find_objects(subject=stream, predicate=PRED.hasStreamDefinition, id_only=True)
if stream_defs:
return stream_defs[0]
def get_data_product_provenance(self, data_product_id=''):
# Retrieve information that characterizes how this data was produced
# Return in a dictionary
# There are two parts to the provenance tree as returned by this method. The first part is
# a path that follows the DataProduct to its parent DataProduct .. all the way to the InstrumentAgent
# The second part is along the parameters contained within the DataProducts. The Provenance along the
# parameters can follow its own pathways.
self.provenance_results = {}
validate_is_not_none(data_product_id, 'A data product identifier must be passed to create a provenance report')
# Walk up the DataProduct tree
def resource_traversal(resource_id, result):
#Get data product object to verify it exists and what type it is
if resource_id not in result.keys():
result[resource_id] = {'parents' : {}, 'type' : None, 'parameter_provenance': {}}
#parent_info = []
# determine the type of resource. This will determine what parents to look for
resource_obj = self.clients.resource_registry.read(resource_id)
if resource_obj == None:
raise BadRequest('Resource object does not exist.')
return
result[resource_id]['type'] = resource_obj.type_
if result[resource_id]['type'] == "InstrumentDevice" or \
result[resource_id]['type'] == 'PlatformDevice':
# Do nothing. We have reached the top of the tree
return
# if the resource is a dataproduct, check for parent dataproduct, data process or Instrument/Platform/Dataset Agent
if result[resource_id]['type'] == "DataProduct":
# If its a derived data product, it should have a parent
parent_data_product_ids,_ = self.clients.resource_registry.find_objects(subject=resource_id,
object_type=RT.DataProduct,
predicate=PRED.hasDataProductParent,
id_only=True)
# recurse if we found data product parents
if(parent_data_product_ids !=None and len(parent_data_product_ids) > 0):
for _id in parent_data_product_ids:
result[resource_id]['parents'][_id] = {'data_process_definition_id' : None,
'data_process_definition_name' : None,
'data_process_definition_rev' : None}
#recurse
for _id in parent_data_product_ids:
resource_traversal(_id, result)
#return
# Code reaches here if no parents were found.
# Try the hasOutputProduct association with a dataprocess (from a transform
# func or retrieve process etc)
parent_data_process_ids,_ = self.clients.resource_registry.find_subjects(object=resource_id,
subject_type=RT.DataProcess,
predicate=PRED.hasOutputProduct,
id_only=True)
# Add the data Process definitions as parents and their input data product as the parent
if (parent_data_process_ids != None and len(parent_data_process_ids) > 0):
for parent_process_id in parent_data_process_ids:
parent_dpd_objs,_ = self.clients.resource_registry.find_subjects(object=parent_process_id,
subject_type=RT.DataProcessDefinition,
predicate=PRED.hasDataProcess,
id_only=False)
if (parent_dpd_objs == None or len(parent_dpd_objs) == 0):
raise BadRequest('Could not locate Data Process Definition')
return
#Check to see what type of data_process_type was associated with it. If its a TRANSFORM_PROCESS
# or RETRIEVE_PROCESS, treat it as a parent. If its a PARAMETER_FUNCTION, add it to the parameter
# provenance.
if parent_dpd_objs[0].data_process_type == DataProcessTypeEnum.TRANSFORM_PROCESS or \
parent_dpd_objs[0].data_process_type == DataProcessTypeEnum.RETRIEVE_PROCESS :
# Whats the input data product ?
input_to_data_process_ids,_ = self.clients.resource_registry.find_objects(subject=parent_process_id,
object_type=RT.DataProduct,
predicate=PRED.hasInputProduct,
id_only=True)
# Add information about the parent
for _id in input_to_data_process_ids:
result[resource_id]['parents'][_id] = {'data_process_definition_id' : parent_dpd_objs[0]._id,
'data_process_definition_name' : parent_dpd_objs[0].name,
'data_process_definition_rev' : parent_dpd_objs[0]._rev}
# recurse
for _id in input_to_data_process_ids:
resource_traversal(_id, result)
# In case of a parameter function, follow parameter provenance
if parent_dpd_objs[0].data_process_type == DataProcessTypeEnum.PARAMETER_FUNCTION:
input_params = parent_dpd_objs[0].parameters
result[resource_id]['parameter_provenance'][parent_process_id] = \
{'data_process_definition_id' : parent_dpd_objs[0]._id,
'data_process_definition_name' : parent_dpd_objs[0].name,
'data_process_definition_rev' : parent_dpd_objs[0]._rev,
'parameters' : input_params}
#return
# If code reaches here, we still have not found any parents, maybe we have reached a parsed data product,
# in which case we want to follow a link to an InstrumentDevice or PlatformDeveice
# via a hasOutputProduct predicate
instrument_device_ids,_ = self.clients.resource_registry.find_subjects(object=resource_id,
subject_type=RT.InstrumentDevice,
predicate=PRED.hasOutputProduct,
id_only=True)
platform_device_ids,_ = self.clients.resource_registry.find_subjects(object=resource_id,
subject_type=RT.PlatformDevice,
predicate=PRED.hasOutputProduct,
id_only=True)
source_device_ids = instrument_device_ids + platform_device_ids
if (source_device_ids != None and len(source_device_ids) > 0):
for _id in source_device_ids:
result[resource_id]['parents'][_id]={'data_process_definition_id' : None,
'data_process_definition_name' : None,
'data_process_definition_rev' : None}
for _id in source_device_ids:
resource_traversal(_id, result)
#return
else:
# log an error for not being able to find the source instrument
log.error("Could not locate the source device for :" + resource_id)
resource_traversal(data_product_id, self.provenance_results)
# We are actually interested in the DataProcessDefinitions for the DataProcess. Find those
return self.provenance_results
def get_data_product_parameter_provenance(self, data_product_id='', parameter_name=''):
# Provides an audit trail for modifications to the original data
provenance_image = StringIO.StringIO()
#first get the assoc stream definition
stream_def_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStreamDefinition, object_type=RT.StreamDefinition)
if not stream_def_ids:
raise BadRequest('No stream definitions found for this data product: %s', data_product_id)
else:
param_dict_ids, _ = self.clients.resource_registry.find_objects(subject=stream_def_ids[0], predicate=PRED.hasParameterDictionary, object_type=RT.ParameterDictionary)
if not param_dict_ids:
raise BadRequest('No parameter dictionary found for this data product: %s', data_product_id)
else:
#context = self.clients.dataset_management.read_parameter_context_by_name(parameter_context_id)
pdict = DatasetManagementService.get_parameter_dictionary(param_dict_ids[0]._id)
context = pdict.get_context(parameter_name)
#log.debug('get_data_product_parameter_provenance context: %s ', context)
if hasattr(context, 'param_type'):
graph = context.param_type.get_dependency_graph()
pos=nx.spring_layout(graph)
nx.draw(graph, pos, font_size=10)
plt.savefig(provenance_image)
provenance_image.seek(0)
else:
raise BadRequest('Invalid paramter context found for this data product: %s', data_product_id)
return provenance_image.getvalue()
def get_data_product_parameters(self, data_product_id='', id_only=False):
stream_defs, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasStreamDefinition, id_only=False)
if not stream_defs:
raise BadRequest("No Stream Definition Found for data product %s" % data_product_id)
stream_def = stream_defs[0]
pdicts, _ = self.clients.resource_registry.find_objects(stream_def._id, PRED.hasParameterDictionary, id_only=True)
if not pdicts:
raise BadRequest("No Parameter Dictionary Found for data product %s" % data_product_id)
pdict_id = pdicts[0]
parameters, _ = self.clients.resource_registry.find_objects(pdict_id, PRED.hasParameterContext, id_only=False)
if not parameters:
raise NotFound("No parameters are associated with this data product")
# too complicated for one line of code
#retval = { p.name : p._id for p in parameters if not filtered or (filtered and p in stream_def.available_fields) }
param_id = lambda x, id_only : x._id if id_only else x
retval = []
for p in parameters:
if (stream_def.available_fields and p.name in stream_def.available_fields) or not stream_def.available_fields:
retval.append(param_id(p, id_only))
return retval
def _registration_rpc(self, op, data_product_id):
procs,_ = self.clients.resource_registry.find_resources(restype=RT.Process, id_only=True)
pid = None
for p in procs:
if 'registration_worker' in p:
pid = p
if not pid:
log.warning('No registration worker found')
return
rpc_cli = RPCClient(to_name=pid)
return rpc_cli.request({'data_product_id':data_product_id}, op=op)
def create_catalog_entry(self, data_product_id=''):
return self._registration_rpc('create_entry',data_product_id)
def read_catalog_entry(self, data_product_id=''):
return self._registration_rpc('read_entry', data_product_id)
def update_catalog_entry(self, data_product_id=''):
return self._registration_rpc('update_entry', data_product_id)
def delete_catalog_entry(self, data_product_id=''):
return self._registration_rpc('delete_entry', data_product_id)
def execute_data_product_lifecycle(self, data_product_id="", lifecycle_event=""):
"""
declare a data_product to be in a given state
@param data_product_id the resource id
"""
return self.RR2.advance_lcs(data_product_id, lifecycle_event)
def get_data_product_group_list(self, org_id=''):
group_names = set()
res_ids, keys = self.clients.resource_registry.find_resources_ext(RT.DataProduct, attr_name="ooi_product_name", id_only=True)
for key in keys:
group_name = key.get('attr_value', None)
if group_name:
group_names.add(group_name)
return sorted(list(group_names))
def _get_dataset_id(self, data_product_id=''):
# find datasets for the data product
dataset_id = ''
dataset_ids, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasDataset, RT.Dataset, id_only=True)
if dataset_ids:
dataset_id = dataset_ids[0]
else:
raise NotFound('No Dataset is associated with DataProduct %s' % data_product_id)
return dataset_id
############################
#
# EXTENDED RESOURCES
#
############################
def get_data_product_extension(self, data_product_id='', ext_associations=None, ext_exclude=None, user_id=''):
#Returns an DataProductExtension object containing additional related information
if not data_product_id:
raise BadRequest("The data_product_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_product = extended_resource_handler.create_extended_resource_container(
extended_resource_type=OT.DataProductExtension,
resource_id=data_product_id,
computed_resource_type=OT.DataProductComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
# Set data product source device (WARNING: may not be unique)
extended_product.source_device = None
dp_source, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasSource, id_only=False)
for dps in dp_source:
if isinstance(dps, Device):
if extended_product.source_device == None:
extended_product.source_device = dps
else:
log.warn("DataProduct %s has additional source device: %s", data_product_id, dps._id)
#extract the list of upstream data products from the provenance results
# dp_list = []
# for key, value in extended_product.computed.provenance.value.iteritems():
# for producer_id, dataprodlist in value['inputs'].iteritems():
# for dataprod in dataprodlist:
# dp_list.append( self.clients.resource_registry.read(dataprod) )
# extended_product.provenance_product_list = list ( set(dp_list) ) #remove dups in list
#set the data_ingestion_datetime from get_data_datetime
if extended_product.computed.data_datetime.status == ComputedValueAvailability.PROVIDED :
extended_product.data_ingestion_datetime = extended_product.computed.data_datetime.value[1]
#get the dataset size in MB
extended_product.computed.product_download_size_estimated = self._get_product_dataset_size(data_product_id)
#covert to bytes for stored_data_size attribute
extended_product.computed.stored_data_size.value = int(extended_product.computed.product_download_size_estimated.value * 1048576)
extended_product.computed.stored_data_size.status = extended_product.computed.product_download_size_estimated.status
extended_product.computed.stored_data_size.reason = extended_product.computed.product_download_size_estimated.reason
# divide up the active and past user subscriptions
active = []
nonactive = []
for notification_obj in extended_product.computed.active_user_subscriptions.value:
if notification_obj.lcstate == LCS.RETIRED:
nonactive.append(notification_obj)
else:
active.append(notification_obj)
extended_product.computed.active_user_subscriptions.value = active
extended_product.computed.past_user_subscriptions.value = nonactive
extended_product.computed.past_user_subscriptions.status = ComputedValueAvailability.PROVIDED
extended_product.computed.number_active_subscriptions.value = len(active)
extended_product.computed.number_active_subscriptions.status = ComputedValueAvailability.PROVIDED
# replace list of lists with single list
replacement_data_products = []
for inner_list in extended_product.process_input_data_products:
if inner_list:
for actual_data_product in inner_list:
if actual_data_product:
replacement_data_products.append(actual_data_product)
extended_product.process_input_data_products = replacement_data_products
from ion.util.extresource import strip_resource_extension, get_matchers, matcher_ParameterContext
matchers = get_matchers([matcher_ParameterContext])
strip_resource_extension(extended_product, matchers=matchers)
return extended_product
def get_data_datetime(self, data_product_id=''):
# Returns a temporal bounds object of the span of data product life span (may exist without getting a granule)
ret = IonObject(OT.ComputedListValue)
ret.value = []
ret.status = ComputedValueAvailability.NOTAVAILABLE
try:
dataset_id = self._get_dataset_id(data_product_id)
bounds = self.clients.dataset_management.dataset_bounds(dataset_id)
if 'time' in bounds and len(bounds['time']) == 2 :
log.debug("get_data_datetime bounds['time']: %s" % str(dataset_id))
timeStart = IonTime(bounds['time'][0] - IonTime.JAN_1970)
timeEnd = IonTime(bounds['time'][1] - IonTime.JAN_1970)
ret.value = [str(timeStart), str(timeEnd)]
ret.status = ComputedValueAvailability.PROVIDED
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Dataset for this Data Product could not be located"
except Exception as e:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Could not calculate time range for this data product"
return ret
def _get_product_dataset_size(self, data_product_id=''):
# Returns the size of the full data product if downloaded/presented in a given presentation form
ret = IonObject(OT.ComputedFloatValue)
ret.value = 0
try:
dataset_id = self._get_dataset_id(data_product_id)
size_in_bytes = self.clients.dataset_management.dataset_size(dataset_id, in_bytes=False)
ret.status = ComputedValueAvailability.PROVIDED
ret.value = size_in_bytes
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Dataset for this Data Product could not be located"
except Exception as e:
raise e
return ret
def get_data_contents_updated(self, data_product_id=''):
# the datetime when the contents of the data were last modified in any way.
# This is distinct from modifications to the data product attributes
ret = IonObject(OT.ComputedStringValue)
ret.value = ""
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Currently need to retrieve form the coverage"
return ret
def get_parameters(self, data_product_id=''):
# The set of Parameter objects describing each variable in this data product
ret = IonObject(OT.ComputedListValue)
ret.value = []
try:
stream_def_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStreamDefinition, id_only=True)
if not stream_def_ids:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "There is no StreamDefinition associated with this DataProduct"
return ret
stream_def = self.clients.pubsub_management.read_stream_definition(stream_definition_id=stream_def_ids[0])
param_dict_ids, _ = self.clients.resource_registry.find_objects(subject=stream_def_ids[0], predicate=PRED.hasParameterDictionary, id_only=True)
if not param_dict_ids:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "There is no ParameterDictionary associated with this DataProduct"
else:
ret.status = ComputedValueAvailability.PROVIDED
if stream_def.available_fields:
retval = [i for i in self.clients.dataset_management.read_parameter_contexts(param_dict_ids[0]) if i.name in stream_def.available_fields]
else:
retval = self.clients.dataset_management.read_parameter_contexts(param_dict_ids[0])
retval = filter(lambda x : 'visible' not in x.parameter_context or x.parameter_context['visible'], retval)
ret.value = retval
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "FIXME: this message should say why the calculation couldn't be done"
except Exception as e:
raise e
return ret
def get_data_url(self, data_product_id=''):
# The unique pointer to this set of data
ret = IonObject(OT.ComputedStringValue)
ret.value = ""
erddap_host = CFG.get_safe('server.erddap.host','localhost')
errdap_port = CFG.get_safe('server.erddap.port','8080')
try:
data_product = self.container.resource_registry.read(data_product_id)
if data_product.category == DataProductTypeEnum.EXTERNAL:
if len(data_product.reference_urls) == 1:
ret.value = data_product.reference_urls[0]
ret.status = ComputedValueAvailability.PROVIDED
log.debug("get_data_url: data_url: %s", ret.value)
else:
ret.value = string.join( ["http://", erddap_host, ":", str(errdap_port),"/erddap/tabledap/", "data", str(data_product_id), ".html"],'')
ret.status = ComputedValueAvailability.PROVIDED
log.debug("get_data_url: data_url: %s", ret.value)
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Dataset for this Data Product could not be located"
return ret
def get_provenance(self, data_product_id=''):
# Provides an audit trail for modifications to the original data
ret = IonObject(OT.ComputedDictValue)
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Provenance not currently used."
ret = IonObject(OT.ComputedDictValue)
# try:
# ret.value = self.get_data_product_provenance(data_product_id)
# ret.status = ComputedValueAvailability.PROVIDED
# except NotFound:
# ret.status = ComputedValueAvailability.NOTAVAILABLE
# ret.reason = "Error in DataProuctMgmtService:get_data_product_provenance"
# except Exception as e:
# raise e
return ret
def get_active_user_subscriptions(self, data_product_id=''):
# The UserSubscription objects for this data product
ret = IonObject(OT.ComputedListValue)
ret.value = []
try:
ret.value = self.clients.user_notification.get_subscriptions(resource_id=data_product_id, include_nonactive=True)
ret.status = ComputedValueAvailability.PROVIDED
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Product subscription infromation not provided by UserNotificationService"
except Exception as e:
raise e
return ret
def get_last_granule(self, data_product_id=''):
# Provides information for users who have in the past acquired this data product, but for which that acquisition was terminated
ret = IonObject(OT.ComputedDictValue)
ret.value = {}
try:
dataset_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasDataset, id_only=True)
if not dataset_ids:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "No dataset associated with this data product"
return ret
stream_def_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStreamDefinition, id_only=True)
if not stream_def_ids:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "No stream definition associated with this data product"
return ret
#stream_def_id = stream_def_ids[0]
#replay_granule = self.clients.data_retriever.retrieve_last_data_points(dataset_ids[0], number_of_points=1, delivery_format=stream_def_id)
#replay_granule = self.clients.data_retriever.retrieve_last_granule(dataset_ids[0])
rdt = ParameterHelper.rdt_for_data_product(data_product_id)
values = self.clients.dataset_management.dataset_latest(dataset_ids[0])
for k,v in values.iteritems():
if k in rdt:
rdt[k] = [v]
retval = {}
for k,v in rdt.iteritems():
if hasattr(rdt.context(k),'visible') and not rdt.context(k).visible:
continue
if k.endswith('_qc') and not k.endswith('glblrng_qc'):
continue
element = np.atleast_1d(rdt[k]).flatten()[0]
if element == rdt._pdict.get_context(k).fill_value:
retval[k] = '%s: Empty' % k
elif rdt._pdict.get_context(k).uom and 'seconds' in rdt._pdict.get_context(k).uom:
units = rdt._pdict.get_context(k).uom
element = np.atleast_1d(rdt[k]).flatten()[0]
unix_ts = TimeUtils.units_to_ts(units, element)
dtg = datetime.utcfromtimestamp(unix_ts)
try:
retval[k] = '%s: %s' %(k,dtg.strftime('%Y-%m-%dT%H:%M:%SZ'))
except:
retval[k] = '%s: %s' %(k, element)
elif isinstance(element, float) or (isinstance(element,np.number) and element.dtype.char in 'edfg'):
try:
precision = int(rdt.context(k).precision)
except ValueError:
precision = 5
except TypeError: # None results in a type error
precision = 5
formatted = ("{0:.%df}" % precision).format(round(element,precision))
retval[k] = '%s: %s' %(k, formatted)
else:
retval[k] = '%s: %s' % (k,element)
ret.value = retval
# ret.value = {k : str(rdt[k].tolist()[0]) for k,v in rdt.iteritems()}
ret.status = ComputedValueAvailability.PROVIDED
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "FIXME: this message should say why the calculation couldn't be done"
except Exception as e:
raise e
return ret
def get_recent_granules(self, data_product_id=''):
# Provides information for users who have in the past acquired this data product, but for which that acquisition was terminated
ret = IonObject(OT.ComputedDictValue)
ret.value = {}
ret.status = ComputedValueAvailability.NOTAVAILABLE
# try:
# dataset_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasDataset, id_only=True)
# if not dataset_ids:
# ret.status = ComputedValueAvailability.NOTAVAILABLE
# ret.reason = "No dataset associated with this data product"
# else:
# replay_granule = self.clients.data_retriever.retrieve_last_data_points(dataset_ids[0])
# rdt = RecordDictionaryTool.load_from_granule(replay_granule)
# ret.value = {k : rdt[k].tolist() for k,v in rdt.iteritems()}
# ret.status = ComputedValueAvailability.PROVIDED
# except NotFound:
# ret.status = ComputedValueAvailability.NOTAVAILABLE
# ret.reason = "FIXME: this message should say why the calculation couldn't be done"
# except Exception as e:
# raise e
return ret
def get_is_persisted(self, data_product_id=''):
# Returns True if data product is currently being persisted
ret = IonObject(OT.ComputedIntValue)
ret.value = self.is_persisted(data_product_id)
ret.status = ComputedValueAvailability.PROVIDED
return ret
def _has_lookup_values(self, data_product_id):
stream_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasStream, id_only=True)
if not stream_ids:
raise BadRequest('No streams found for this data product')
stream_def_ids, _ = self.clients.resource_registry.find_objects(subject=stream_ids[0], predicate=PRED.hasStreamDefinition, id_only=True)
if not stream_def_ids:
raise BadRequest('No stream definitions found for this stream')
return self.clients.pubsub_management.has_lookup_values(stream_definition_id=stream_def_ids[0])
def _get_lookup_documents(self, data_product_id):
return self.clients.data_acquisition_management.list_qc_references(data_product_id)
def _format_ion_time(self, ion_time=''):
ion_time_obj = IonTime.from_string(ion_time)
#todo: fix this and return str( ion_time_obj)
return str(ion_time_obj)
############################
#
# PREPARE UPDATE RESOURCES
#
############################
def prepare_data_product_support(self, data_product_id=''):
"""
Returns the object containing the data to update an instrument device resource
"""
#TODO - does this have to be filtered by Org ( is an Org parameter needed )
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(data_product_id, OT.DataProductPrepareSupport)
#Fill out service request information for creating a platform device
extended_resource_handler.set_service_requests(resource_data.create_request, 'data_product_management',
'create_data_product_', { "data_product": "$(data_product)" })
#Fill out service request information for creating a platform device
extended_resource_handler.set_service_requests(resource_data.update_request, 'data_product_management',
'update_data_product', { "data_product": "$(data_product)" })
#Fill out service request information for activating a platform device
extended_resource_handler.set_service_requests(resource_data.activate_request, 'data_product_management',
'activate_data_product_persistence', { "data_product": "$(data_product)" })
#Fill out service request information for deactivating a platform device
extended_resource_handler.set_service_requests(resource_data.deactivate_request, 'data_product_management',
'suspend_data_product_persistence', { "data_product": "$(data_product)" })
#Fill out service request information for assigning a stream definition
extended_resource_handler.set_service_requests(resource_data.associations['StreamDefinition'].assign_request, 'data_product_management',
'assign_stream_definition_to_data_product', { "data_product_id": data_product_id,
"stream_definition_id": "$(stream_definition_id)",
"exchange_point": "$(exchange_point)" })
#Fill out service request information for assigning a dataset
extended_resource_handler.set_service_requests(resource_data.associations['Dataset'].assign_request, 'data_product_management',
'assign_dataset_to_data_product', { "data_product_id": data_product_id,
"dataset_id": "$(dataset_id)" })
#Fill out service request information for assigning an instrument
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDeviceHasOutputProduct'].assign_request, 'data_acquisition_management',
'assign_data_product', {"data_product_id": data_product_id,
"input_resource_id": "$(instrument_device_id)"})
#Fill out service request information for unassigning an instrument
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDeviceHasOutputProduct'].unassign_request, 'data_acquisition_management',
'unassign_data_product', {"data_product_id": data_product_id,
"input_resource_id": "$(instrument_device_id)" })
#Fill out service request information for assigning an instrument
extended_resource_handler.set_service_requests(resource_data.associations['PlatformDevice'].assign_request, 'data_acquisition_management',
'assign_data_product', {"data_product_id": data_product_id,
"input_resource_id": "$(platform_device_id)"})
#Fill out service request information for unassigning an instrument
extended_resource_handler.set_service_requests(resource_data.associations['PlatformDevice'].unassign_request, 'data_acquisition_management',
'unassign_data_product', {"data_product_id": data_product_id,
"input_resource_id": "$(platform_device_id)" })
# DataProduct hasSource InstrumentDevice*
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDeviceHasSource'].assign_request,
'data_acquisition_management',
'assign_data_product_source',
{'data_product_id': data_product_id,
'source_id': '$(data_product_id)'}) # yes this is odd, but its the variable name we want to substitute based on resource_identifier (i'm not sure where that is set)
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDeviceHasSource'].unassign_request,
'data_acquisition_management',
'unassign_data_product_source',
{'data_product_id': data_product_id,
'source_id': '$(data_product_id)'})
resource_data.associations['InstrumentDeviceHasSource'].multiple_associations = True
return resource_data
def check_dpms_policy(self, process, message, headers):
try:
gov_values = GovernanceHeaderValues(headers=headers, process=process, resource_id_required=False)
except Inconsistent, ex:
return False, ex.message
resource_id = message.data_product_id
# Allow actor to suspend/activate persistence in an org where the actor has the appropriate role
orgs,_ = self.clients.resource_registry.find_subjects(subject_type=RT.Org, predicate=PRED.hasResource, object=resource_id, id_only=False)
for org in orgs:
if (has_org_role(gov_values.actor_roles, org.org_governance_name, [INSTRUMENT_OPERATOR, DATA_OPERATOR, ORG_MANAGER_ROLE, OBSERVATORY_OPERATOR])):
log.error("returning true: "+str(gov_values.actor_roles))
return True, ''
log.error("returning false: "+str(gov_values.actor_roles))
return False, '%s(%s) has been denied since the user does not have an appropriate role in any org to which the data product id %s belongs ' % (process.name, gov_values.op, resource_id)
| bsd-2-clause |
ergosimulation/mpslib | scikit-mps/examples/mpslib_2d_to_coarsen.py | 1 | 2277 | # mpslib_2d_to_coarsen: Effect ofo corsening the training image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import mpslib as mps
from scipy import squeeze
import copy
%matplotlib widget
plt.ion()
di=6 # Use every di'th data
# NO coarsening --> 1 2D TI
TI1, TI_filename1 = mps.trainingimages.strebelle(di, coarse3d=0)
# Coarsening --> multiple 2D TI
TI2, TI_filename2 = mps.trainingimages.strebelle(di, coarse3d=1)
mps.eas.write_mat(TI1,'ti1.dat')
mps.eas.write_mat(TI2,'ti2.dat')
#%% Plot the training images
fig = plt.figure(figsize=(15, 15))
outer = gridspec.GridSpec(2, 2, wspace=0.2, hspace=0.2)
ax1 = plt.Subplot(fig, outer[0])
fig.add_subplot(ax1)
plt.imshow(np.transpose(TI1[:,:,0]))
plt.title('One coarse TI')
ax1 = plt.Subplot(fig, outer[1])
fig.add_subplot(ax1)
plt.axis('off')
plt.title('LL')
plt.title('Mulitple coarse TI')
nsp = int(np.floor(np.sqrt(TI2.shape[2])))
for i in [1]:
inner = gridspec.GridSpecFromSubplotSpec(nsp, nsp,
subplot_spec=outer[i], wspace=0.02, hspace=0.02)
for j in range(nsp*nsp):
ax = plt.Subplot(fig, inner[j])
fig.add_subplot(ax)
plt.imshow(np.transpose(TI2[:, :, j]))
plt.axis('off')
#%% RUN THE SIMULATIONS
alg='mps_snesim_tree'
#alg='mps_genesim'
nc=8*8
rseed=1;
#nc=8*8
#Coarsen channel TI, using only coarsened TI
# Single coarsened TI
O1=mps.mpslib(method=alg)
O1.par['debug_level']=-1
O1.par['ti_fnam']='ti1.dat'
O1.par['simulation_grid_size'][0]=85
O1.par['simulation_grid_size'][1]=45
O1.ti = TI1
O1.par['simulation_grid_size'][2]=1
O1.par['shuffle_simulation_grid']=0
O1.par['n_cond']=nc
O1.par['rseed']=rseed
O1.par['n_real']=1
O1.parameter_filename='sim1.par'
O1.delete_local_files()
O1.run()
# Detailed Coarsened TI
O2=copy.deepcopy(O1);
O2.ti = TI2
O2.parameter_filename='sim2.par'
O2.delete_local_files()
O2.run()
#%%
ax3 = plt.Subplot(fig, outer[2])
fig.add_subplot(ax3)
plt.imshow(np.transpose(squeeze(O1.sim[0])))
plt.title('Real using Single coarse TI')
ax4 = plt.Subplot(fig, outer[3])
fig.add_subplot(ax4)
plt.imshow(np.transpose(squeeze(O2.sim[0])))
plt.title('Real using Multiple coarse TI')
plt.suptitle("%s - di=%d" % (alg,di))
plt.savefig('mpslib_coarsen_%d_%s.png'%(di,alg))
plt.show()
| lgpl-3.0 |
bamford/gzhubble | python/creating_debiased_catalog/STEP_4_hubble_catalog.py | 2 | 8825 |
# coding: utf-8
# In[1]:
#get_ipython().magic(u'matplotlib inline')
@profile
def run_all():
print("Cell 1 - importing packages")
from astropy.io import fits as pyfits
from astropy.table import Table
import numpy as np
from astropy.io.fits import Column
from datetime import datetime
import matplotlib.pyplot as plt
import requests
# In[2]:
print("Cell 2 - define download_from_dropbox module")
def download_from_dropbox(url):
local_filename = "{:}".format(url.split("/")[-1].split("?")[0])
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename
# In[3]:
print("Cell 3 - download and set zeta parameters")
# zeta parameters
zeta_fname = download_from_dropbox("https://www.dropbox.com/s/ax8sclh0r34oxrq/zeta_parameters.fits?dl=1")
zeta_params=Table.read(zeta_fname)
slope = zeta_params['slope'][0]
intercept=zeta_params['intercept'][0]
# In[4]:
print("Cell 4 - download data into an astropy Table")
hubble_fname = download_from_dropbox("https://www.dropbox.com/s/bfoa17lz23976je/input_for_hubble_debiased_catalog.fits?dl=1")
votes_data=Table.read(hubble_fname)
# In[5]:
print("Cell 5 - find unique OBJNOs")
subjects=set(votes_data['OBJNO'])
# In[6]:
print("Cell 6 - define fitting functions")
#Zeta-hat function for computing debiased values
z0 = 0.3 # we're correcting to redshift 0.3
def fhat_mel(f,z,z0,zeta_hat):
val = 1. - (1. - f)*np.exp(-(z - z0)/ zeta_hat)
return val
#assume zeta_hat is a linear function of surface brightness
def zeta_hat_lin(SB):
val = 10.**((SB*slope) + intercept)
return val
# In[7]:
print("Cell 7 - create empty table")
#And here we define our new table!
intcolumn = np.zeros(len(subjects),dtype=int)
floatcolumn = np.zeros(len(subjects),dtype=float)
strcolumn = np.array([' ']*len(subjects),dtype='S24')
#S24=24 character string
#c01 = Column(name='num_classifications', format='J', array=floatcolumn) # c05 = c01, by definition
# In[9]:
print("Cell 9 - remove columns not in old table")
#First copy the raw and weighted vote information to new table.
#remove columns that are not in old table - those will be transferred manually
from astropy.table import Column as TableColumn
ex1 = TableColumn(floatcolumn,name='t01_smooth_or_features_a01_smooth_debiased_fraction', format='D')
ex2 = TableColumn(floatcolumn,name='t01_smooth_or_features_a01_smooth_lower_limit', format='D')
ex3 = TableColumn(floatcolumn,name='t01_smooth_or_features_a01_smooth_upper_limit', format='D')
ex4 = TableColumn(floatcolumn,name='t01_smooth_or_features_a01_smooth_best_fraction',format='D')
ex5 = TableColumn(floatcolumn,name='t01_smooth_or_features_a02_features_or_disk_debiased_fraction', format='D')
ex6 = TableColumn(floatcolumn,name='t01_smooth_or_features_a02_features_or_disk_best_fraction',format='D')
newtable = votes_data.copy(copy_data=True)
newtable.add_columns( (ex1, ex2, ex3, ex4, ex5, ex6) )
# In[10]:
print("Cell 10 - enter data")
z = 'Z_BEST'
print 'Writing file...'
# Why bother to do this row by row?
for i,gal in enumerate(votes_data):
if i % 1000 == 0:
t=datetime.now().time().isoformat()
print 'Writing %ith row at time %s' %(i,t)
#transfer all raw/weighted data
#for column_name in old_columns:
# hubble_catalog.data.field(column_name)[i] = gal[column_name]
#new data: debiased vote fractions
p_features_debiased = fhat_mel(gal['t01_smooth_or_features_a02_features_or_disk_weighted_fraction'],gal[z],z0,zeta_hat_lin(gal['MU_HI']))
newtable.field('t01_smooth_or_features_a02_features_or_disk_debiased_fraction')[i] = p_features_debiased
#write the 'best features' fraction column
if newtable.field('Correctable_Category')[i]=='correctable':
p_features_best = newtable.field('t01_smooth_or_features_a02_features_or_disk_debiased_fraction')[i]
elif newtable.field('Correctable_Category')[i]=='uncorrectable':
p_features_best = max(newtable.field('t01_smooth_or_features_a02_features_or_disk_lower_limit')[i],newtable.field('t01_smooth_or_features_a02_features_or_disk_weighted_fraction')[i])
else:
p_features_best = newtable.field('t01_smooth_or_features_a02_features_or_disk_weighted_fraction')[i]
newtable.field('t01_smooth_or_features_a02_features_or_disk_best_fraction')[i] = p_features_best
#debiased, lower and upper, best smooth fractions based on 1 - p_artifact - p_features
newtable.field('t01_smooth_or_features_a01_smooth_debiased_fraction')[i] = 1 - gal['t01_smooth_or_features_a03_star_or_artifact_weighted_fraction'] - p_features_debiased
newtable.field('t01_smooth_or_features_a01_smooth_lower_limit')[i] = 1 - gal['t01_smooth_or_features_a03_star_or_artifact_weighted_fraction'] - gal['t01_smooth_or_features_a02_features_or_disk_upper_limit']
newtable.field('t01_smooth_or_features_a01_smooth_upper_limit')[i] = 1 - gal['t01_smooth_or_features_a03_star_or_artifact_weighted_fraction'] - gal['t01_smooth_or_features_a02_features_or_disk_lower_limit']
newtable.field('t01_smooth_or_features_a01_smooth_best_fraction')[i] = 1 - gal['t01_smooth_or_features_a03_star_or_artifact_weighted_fraction'] - p_features_best
# In[11]:
print("Cell 11 - write to file")
#write to file
newtable.write('gz_hubble_catalog_%i_%i_%i.fits'%(datetime.now().month,datetime.now().day,datetime.now().year),format='fits',overwrite=True)
# In[12]:
print("Cell 12 - set categories for plots")
correctable=(votes_data['Correctable_Category']=='correctable ')
uncorrectable=(votes_data['Correctable_Category']=='uncorrectable ')
nei=(votes_data['Correctable_Category']=='nei ')
old_votes=votes_data['t01_smooth_or_features_a02_features_or_disk_weighted_fraction']
old_lower_limit=votes_data['t01_smooth_or_features_a02_features_or_disk_lower_limit']
old_z=votes_data[z]
old_mu=votes_data['MU_HI']
new_c=fhat_mel(old_votes[correctable],old_z[correctable],z0,zeta_hat_lin(old_mu[correctable]))
new_unc=fhat_mel(old_votes[uncorrectable],old_z[uncorrectable],z0,zeta_hat_lin(old_mu[uncorrectable])) #debiased value
#new_unc = old_lower_limit[uncorrectable] #lower limit
new_nei=fhat_mel(old_votes[nei],old_z[nei],z0,zeta_hat_lin(old_mu[nei]))
# In[13]:
print("Cell 13 - plot results")
# 2D histogram of new p_features distribution
fig = plt.figure(figsize=(20,5))
ax1 = fig.add_subplot(131)
hex1 = ax1.hexbin(old_votes[correctable],new_c, cmap=plt.cm.YlOrRd_r,gridsize=50,vmin =0,vmax=100)
ax1.set_xlabel(r'$f_{features}$',fontsize=20)
ax1.set_ylabel(r'$\hat f_{features}$',fontsize=20)
cb1 = plt.colorbar(hex1)
ax1.set_title('correctable data, linear $\hat \zeta$',fontsize=20)
# Add the one-to-one line for comparision.
# Upper left = boosted p_features for z > 0.3
# Lower right = depressed p_features for z < 0.3
ax1.plot([0,1],[0,1],color='k',lw=2,ls='--')
# Try it with Mel's new function
ax2 = fig.add_subplot(132)
hex2 = ax2.hexbin(old_votes[nei],new_nei, cmap=plt.cm.YlOrRd_r,gridsize=50,vmax=100)
ax2.set_xlabel(r'$f_{features}$',fontsize=20)
ax2.set_ylabel(r'$\hat f_{features}$',fontsize=20)
ax2.set_xlim(0,1)
ax2.set_ylim(0,1)
cb2 = plt.colorbar(hex2)
ax2.plot([0,1],[0,1],color='k',lw=2,ls='--')
ax2.set_title('NEI data, linear $\hat \zeta$',fontsize=20)
ax3 = fig.add_subplot(133)
hex3 = ax3.hexbin(old_votes[uncorrectable],new_unc, cmap=plt.cm.YlOrRd_r,gridsize=50,vmin =0, vmax=100)
ax3.set_xlabel(r'$f_{features}$',fontsize=20)
ax3.set_ylabel(r'$\hat f_{features}$',fontsize=20)
ax3.set_xlim(0,1)
ax3.set_ylim(0,1)
cb3 = plt.colorbar(hex3)
ax3.plot([0,1],[0,1],color='k',lw=2,ls='--')
ax3.set_title('uncorrectable data, linear $\hat \zeta$',fontsize=20)
ax3.set_axis_bgcolor('#800000')
return None
if __name__ == "__main__":
run_all()
| mit |
ptitjano/bokeh | examples/webgl/clustering.py | 7 | 2155 | """ Example inspired by an example from the scikit-learn project:
http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html
"""
import numpy as np
try:
from sklearn import cluster, datasets
from sklearn.preprocessing import StandardScaler
except ImportError:
raise ImportError('This example requires scikit-learn (conda install sklearn)')
from bokeh.layouts import row, column
from bokeh.plotting import figure, show, output_file
N = 50000
PLOT_SIZE = 400
# generate datasets.
np.random.seed(0)
noisy_circles = datasets.make_circles(n_samples=N, factor=.5, noise=.04)
noisy_moons = datasets.make_moons(n_samples=N, noise=.05)
centers = [(-2, 3), (2, 3), (-2, -3), (2, -3)]
blobs1 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.4, random_state=8)
blobs2 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.7, random_state=8)
colors = np.array([x for x in ('#00f', '#0f0', '#f00', '#0ff', '#f0f', '#ff0')])
colors = np.hstack([colors] * 20)
# create clustering algorithms
dbscan = cluster.DBSCAN(eps=.2)
birch = cluster.Birch(n_clusters=2)
means = cluster.MiniBatchKMeans(n_clusters=2)
spectral = cluster.SpectralClustering(n_clusters=2, eigen_solver='arpack', affinity="nearest_neighbors")
affinity = cluster.AffinityPropagation(damping=.9, preference=-200)
# change here, to select clustering algorithm (note: spectral is slow)
algorithm = dbscan # <- SELECT ALG
plots =[]
for dataset in (noisy_circles, noisy_moons, blobs1, blobs2):
X, y = dataset
X = StandardScaler().fit_transform(X)
# predict cluster memberships
algorithm.fit(X)
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
p = figure(webgl=True, title=algorithm.__class__.__name__,
plot_width=PLOT_SIZE, plot_height=PLOT_SIZE)
p.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), alpha=0.1,)
plots.append(p)
# generate layout for the plots
layout = column(row(plots[:2]), row(plots[2:]))
output_file("clustering.html", title="clustering with sklearn")
show(layout)
| bsd-3-clause |
AndreaDellera/Tesi | reti/plot.py | 1 | 1454 | __author__ = 'Andrea'
import matplotlib.pyplot as mpl
def main():
fold = "./FF/train45"
in_train = open(fold + "/errors/train_MSE.txt", "r")
in_test = open(fold + "/errors/test_MSE.txt", "r")
# in_valid = open(fold + "/errors/valid_MSE.txt", "r")
test_errors = in_test.readlines()
# test_valid = in_valid.readlines()
train_errors = in_train.readlines()
# plot train and test errors
one, = mpl.plot(range(len(train_errors)), train_errors, label = 'allineamento')
two, = mpl.plot(range(len(test_errors)), test_errors, label = 'validazione', linestyle='--')
# three, = mpl.plot(range(len(test_valid)), test_valid)
mpl.axis([0, 9, 0, 4])
mpl.legend(handles=[one, two])
mpl.ylabel('Errore')
mpl.xlabel('Tempo')
mpl.show()
ptrain = open(fold + "/errors/train_progression.txt", "r")
ptest = open(fold + "/errors/test_progression.txt", "r")
train_errors = ptrain.readlines()
test_errors = ptest.readlines()
mpl.close()
one, = mpl.plot(range(len(train_errors)), train_errors, label = 'allineamento')
two, = mpl.plot(range(len(test_errors)), test_errors, label = 'validazione', linestyle='--')
mpl.axis([0, 10000, 0, 4])
mpl.legend(handles=[one, two])
mpl.ylabel('Errore')
mpl.xlabel('Tempo')
mpl.show()
# in_valid.close()
in_test.close()
in_train.close()
ptrain.close()
ptest.close()
if __name__ == "__main__":
main()
| apache-2.0 |
nhuntwalker/astroML | book_figures/chapter9/fig_discriminant_function.py | 3 | 1601 | """
Example of a Discriminant Function
----------------------------------
This plot shows a simple example of a discriminant function between
two sets of points
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# create some toy data
np.random.seed(0)
cluster_1 = np.random.normal([1, 0.5], 0.5, size=(10, 2))
cluster_2 = np.random.normal([-1, -0.5], 0.5, size=(10, 2))
#------------------------------------------------------------
# plot the data and boundary
fig = plt.figure(figsize=(5, 3.75))
ax = fig.add_subplot(111, xticks=[], yticks=[])
ax.scatter(cluster_1[:, 0], cluster_1[:, 1], c='k', s=30)
ax.scatter(cluster_2[:, 0], cluster_2[:, 1], c='w', s=30)
ax.plot([0, 1], [1.5, -1.5], '-k', lw=2)
ax.set_xlim(-2, 2.5)
ax.set_ylim(-2, 2)
plt.show()
| bsd-2-clause |
miaecle/deepchem | deepchem/hyper/gaussian_process.py | 1 | 12659 | """
Contains class for gaussian process hyperparameter optimizations.
"""
import os
import logging
import tempfile
from typing import Dict, List, Optional, Tuple, Union
from deepchem.data import Dataset
from deepchem.metrics import Metric
from deepchem.hyper.base_classes import HyperparamOpt
from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename
logger = logging.getLogger(__name__)
PARAM_DICT = Dict[str, Union[int, float]]
def compute_parameter_range(params_dict: PARAM_DICT,
search_range: Union[int, float, PARAM_DICT]
) -> Dict[str, Tuple[str, List[float]]]:
"""Convenience Function to compute parameter search space.
Parameters
----------
params_dict: Dict
Dictionary mapping strings to Ints/Floats. An explicit list of
parameters is computed with `search_range`. The optimization range
computed is specified in the documentation for `search_range`
below.
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
Returns
-------
param_range: Dict
Dictionary mapping hyperparameter names to tuples. Each tuple is
of form `(value_type, value_range)` where `value_type` is a string
that is either "int" or "cont" and `value_range` is a list of two
elements of the form `[low, hi]`. This format is expected by
pyGPGO which `GaussianProcessHyperparamOpt` uses to perform
optimization.
"""
# Range of optimization
param_range = {}
if isinstance(search_range, dict):
if sorted(params_dict.keys()) != sorted(search_range.keys()):
raise ValueError(
"If search_range is provided as a dictionary, it must have the same keys as params_dict."
)
elif (not isinstance(search_range, int)) and (not isinstance(
search_range, float)):
raise ValueError("search_range must be a dict or int or float.")
for hp, value in params_dict.items():
if isinstance(search_range, dict):
hp_search_range = search_range[hp]
else:
# We know from guard above that this is an int/float
hp_search_range = search_range
if isinstance(value, int):
value_range = [value // hp_search_range, value * hp_search_range]
param_range[hp] = ("int", value_range)
elif isinstance(value, float):
value_range = [value / hp_search_range, value * hp_search_range]
param_range[hp] = ("cont", value_range)
return param_range
class GaussianProcessHyperparamOpt(HyperparamOpt):
"""
Gaussian Process Global Optimization(GPGO)
This class uses Gaussian Process optimization to select
hyperparameters. Underneath the hood it uses pyGPGO to optimize
models. If you don't have pyGPGO installed, you won't be able to use
this class.
Note that `params_dict` has a different semantics than for
`GridHyperparamOpt`. `param_dict[hp]` must be an int/float and is
used as the center of a search range.
Example
-------
This example shows the type of constructor function expected.
>>> import sklearn
>>> import deepchem as dc
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(n_tasks=1, **p))
Here's a more sophisticated example that shows how to optimize only
some parameters of a model. In this case, we have some parameters we
want to optimize, and others which we don't. To handle this type of
search, we create a `model_builder` which hard codes some arguments
(in this case, `n_tasks` and `n_features` which are properties of a
dataset and not hyperparameters to search over.)
>>> def model_builder(**model_params):
... n_layers = model_params['layers']
... layer_width = model_params['width']
... dropout = model_params['dropout']
... return dc.models.MultitaskClassifier(
... n_tasks=5,
... n_features=100,
... layer_sizes=[layer_width]*n_layers,
... dropouts=dropout
... )
>>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(model_builder)
Note
----
This class requires pyGPGO to be installed.
"""
# NOTE: mypy prohibits changing the number of arguments
# FIXME: Signature of "hyperparam_search" incompatible with supertype "HyperparamOpt"
def hyperparam_search( # type: ignore[override]
self,
params_dict: PARAM_DICT,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
use_max: bool = True,
logdir: Optional[str] = None,
max_iter: int = 20,
search_range: Union[int, float, PARAM_DICT] = 4,
logfile: Optional[str] = None):
"""Perform hyperparameter search using a gaussian process.
Parameters
----------
params_dict: Dict
Maps hyperparameter names (strings) to possible parameter
values. The semantics of this list are different than for
`GridHyperparamOpt`. `params_dict[hp]` must map to an int/float,
which is used as the center of a search with radius
`search_range` since pyGPGO can only optimize numerical
hyperparameters.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation(optimization on valid scores)
metric: Metric
metric used for evaluation
use_max: bool, (default True)
Specifies whether to maximize or minimize `metric`.
maximization(True) or minimization(False)
logdir: str, optional, (default None)
The directory in which to store created models. If not set, will
use a temporary directory.
max_iter: int, (default 20)
number of optimization trials
search_range: int/float/Dict (default 4)
The `search_range` specifies the range of parameter values to
search for. If `search_range` is an int/float, it is used as the
global search range for parameters. This creates a search
problem on the following space:
optimization on [initial value / search_range,
initial value * search_range]
If `search_range` is a dict, it must contain the same keys as
for `params_dict`. In this case, `search_range` specifies a
per-parameter search range. This is useful in case some
parameters have a larger natural range than others. For a given
hyperparameter `hp` this would create the following search
range:
optimization on hp on [initial value[hp] / search_range[hp],
initial value[hp] * search_range[hp]]
logfile: str, optional (default None)
Name of logfile to write results to. If specified, this is must
be a valid file. If not specified, results of hyperparameter
search will be written to `logdir/.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.model.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
"""
try:
from pyGPGO.covfunc import matern32
from pyGPGO.acquisition import Acquisition
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.GPGO import GPGO
except ModuleNotFoundError:
raise ValueError("This class requires pyGPGO to be installed.")
# Specify logfile
log_file = None
if logfile:
log_file = logfile
elif logdir is not None:
# Make logdir if it doesn't exist.
if not os.path.exists(logdir):
os.makedirs(logdir, exist_ok=True)
log_file = os.path.join(logdir, "results.txt")
# setup range
param_range = compute_parameter_range(params_dict, search_range)
param_keys = list(param_range.keys())
# Stores all results
all_results = {}
# Store all model references so we don't have to reload
all_models = {}
# Stores all model locations
model_locations = {}
# Demarcating internal function for readability
def optimizing_function(**placeholders):
"""Private Optimizing function
Take in hyper parameter values and return valid set performances
Parameters
----------
placeholders: keyword arguments
Should be various hyperparameters as specified in `param_keys` above.
Returns:
--------
valid_scores: float
valid set performances
"""
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
# param values are always float in BO, so this line converts float to int
# see : https://github.com/josejimenezluna/pyGPGO/issues/10
hyper_parameters[hp] = int(placeholders[hp])
else:
hyper_parameters[hp] = float(placeholders[hp])
logger.info("Running hyperparameter set: %s" % str(hyper_parameters))
if log_file:
with open(log_file, 'w+') as f:
# Record hyperparameters
f.write("Parameters: %s" % str(hyper_parameters))
f.write('\n')
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
if logdir is not None:
filename = "model%s" % hp_str
model_dir = os.path.join(logdir, filename)
logger.info("model_dir is %s" % model_dir)
try:
os.makedirs(model_dir)
except OSError:
if not os.path.isdir(model_dir):
logger.info("Error creating model_dir, using tempfile directory")
model_dir = tempfile.mkdtemp()
else:
model_dir = tempfile.mkdtemp()
# Add it on to the information needed for the constructor
hyper_parameters["model_dir"] = model_dir
model = self.model_builder(**hyper_parameters)
model.fit(train_dataset)
try:
model.save()
# Some models autosave
except NotImplementedError:
pass
multitask_scores = model.evaluate(valid_dataset, [metric])
score = multitask_scores[metric.name]
if log_file:
with open(log_file, 'a') as f:
# Record performances
f.write("Score: %s" % str(score))
f.write('\n')
# Store all results
all_results[hp_str] = score
# Store reference to model
all_models[hp_str] = model
model_locations[hp_str] = model_dir
# GPGO maximize performance by default
# set performance to its negative value for minimization
if use_max:
return score
else:
return -score
# execute GPGO
cov = matern32()
gp = GaussianProcess(cov)
acq = Acquisition(mode='ExpectedImprovement')
gpgo = GPGO(gp, acq, optimizing_function, param_range)
logger.info("Max number of iteration: %i" % max_iter)
gpgo.run(max_iter=max_iter)
hp_opt, valid_performance_opt = gpgo.getResult()
hyper_parameters = {}
for hp in param_keys:
if param_range[hp][0] == "int":
hyper_parameters[hp] = int(hp_opt[hp])
else:
# FIXME: Incompatible types in assignment
hyper_parameters[hp] = float(hp_opt[hp]) # type: ignore
hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters)
# Let's fetch the model with the best parameters
best_model = all_models[hp_str]
# Compare best model to default hyperparameters
if log_file:
with open(log_file, 'a') as f:
# Record hyperparameters
f.write("params_dict:")
f.write(str(params_dict))
f.write('\n')
# Return default hyperparameters
return best_model, hyper_parameters, all_results
| mit |
marenar/SoftwareSystems | lecture14/thinkplot.py | 88 | 12565 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
return cls.color_iter
def PrePlot(num=None, rows=1, cols=1):
"""Takes hints about what's coming.
num: number of lines that will be plotted
"""
if num:
Brewer.InitializeIter(num)
# TODO: get sharey and sharex working. probably means switching
# to subplots instead of subplot.
# also, get rid of the gray background.
if rows > 1 or cols > 1:
pyplot.subplots(rows, cols, sharey=True)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(rows, cols, plot_number):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
pyplot.subplot(rows, cols, plot_number)
class InfiniteList(list):
"""A list that returns the same value for all indices."""
def __init__(self, val):
"""Initializes the list.
val: value to be stored
"""
list.__init__(self)
self.val = val
def __getitem__(self, index):
"""Gets the item with the given index.
index: int
returns: the stored value
"""
return self.val
def Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.iteritems():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
Brewer.ClearIter()
pyplot.clf()
def Figure(**options):
"""Sets options for the current figure."""
Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def Plot(xs, ys, style='', **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
color_iter = Brewer.GetIter()
if color_iter:
try:
options = Underride(options, color=color_iter.next())
except StopIteration:
print 'Warning: Brewer ran out of colors.'
Brewer.ClearIter()
options = Underride(options, linewidth=3, alpha=0.8)
pyplot.plot(xs, ys, style, **options)
def Scatter(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
pyplot.scatter(xs, ys, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ps = pmf.Render()
if pmf.name:
options = Underride(options, label=pmf.name)
Plot(xs, ps, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, fs = hist.Render()
width = min(Diff(xs))
if hist.name:
options = Underride(options, label=hist.name)
options = Underride(options,
align='center',
linewidth=0,
width=width)
pyplot.bar(xs, fs, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs.pop()
ps.pop()
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs.pop(0)
ps.pop(0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
if cdf.name:
options = Underride(options, label=cdf.name)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.iterkeys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
loc = options.get('loc', 0)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
# TODO: figure out how to show more than one plot
Config(**options)
pyplot.show()
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
if root:
for fmt in formats:
SaveFormat(root, fmt)
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print 'Writing', filename
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = Brewer.ColorGenerator(7)
for color in color_iter:
print color
if __name__ == '__main__':
main()
| gpl-3.0 |
studywolf/control | studywolf_control/tasks/write_data/read_path.py | 1 | 3050 | import numpy as np
def get_raw_data(input_name, writebox, spaces=False):
f = open('tasks/write_data/'+input_name+'.txt', 'r')
row = f.readline()
points = []
for row in f:
points.append(row.strip('\n').split(','))
f.close()
points = np.array(points, dtype='float')
points = points[:, :2]
# need to rotate the points
theta = np.pi/2.
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
for ii in range(points.shape[0]):
points[ii] = np.dot(R, points[ii])
# need to mirror the x values
for ii in range(points.shape[0]):
points[ii, 0] *= -1
# center numbers
points[:, 0] -= np.min(points[:,0])
points[:, 1] -= np.min(points[:,1])
# normalize
# TODO: solve weird scaling for 1 and l, and 9
points[:, 0] /= max(points[:,0])
points[:, 1] /= max(points[:,1])
# center numbers
points[:, 0] -= .5 - (max(points[:,0]) - min(points[:, 0])) / 2.0
points[:,0] *= 5.0 / 6.0 * (writebox[1] - writebox[0])
points[:,1] *= (writebox[3] - writebox[2])
if input_name in ('1'):
points[:, 0] /= 15.
if input_name in ('s'):
points[:, 0] /= 5.
if input_name in ('9'):
points[:, 0] /= 2.
if input_name in ('e','o','w','r'):
points[:, 1] /= 2.
points[:,0] += writebox[0]
points[:,1] += writebox[2]
return points
def get_single(**kwargs):
"""Wrap the number with np.nans on either end
"""
num = get_raw_data(**kwargs)
new_array = np.zeros((num.shape[0]+2, num.shape[1]))
new_array[0] = [np.nan, np.nan]
new_array[-1] = [np.nan, np.nan]
new_array[1:-1] = num
return new_array
def get_sequence(sequence, writebox, spaces=False):
"""Returns a sequence
sequence list: the sequence of integers
writebox list: [min x, max x, min y, max y]
"""
nans = np.array([np.nan, np.nan])
nums= nans.copy()
if spaces is False:
each_num_width = (writebox[1] - writebox[0]) / float(len(sequence))
else:
each_num_width = (writebox[1] - writebox[0]) / float(len(sequence)*2 - 1)
for ii, nn in enumerate(sequence):
if spaces is False:
num_writebox = [writebox[0] + each_num_width * ii ,
writebox[0] + each_num_width * (ii+1),
writebox[2], writebox[3]]
else:
num_writebox = [writebox[0] + each_num_width * 2 * ii ,
writebox[0] + each_num_width * 2 * (ii+.5),
writebox[2], writebox[3]]
if isinstance(nn, int):
nn = str(nn)
num = get_raw_data(nn, num_writebox)
nums = np.vstack([nums, num, nans])
return nums
### Testing code ###
if __name__ == '__main__':
import matplotlib.pyplot as plt
files=['h','e','l','l','o','w','o','r','l','d']
nums = get_sequence(files, writebox=[-1,1,0,1], spaces=False)
plt.plot(nums[:,0], nums[:,1])
plt.show()
| gpl-3.0 |
turi-code/SFrame | oss_src/unity/python/sframe/test/test_sframe.py | 5 | 134915 | '''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
from ..data_structures.sframe import SFrame
from ..data_structures.sarray import SArray
from ..data_structures.image import Image
from ..connect import main as glconnect
from ..util import _assert_sframe_equal, generate_random_sframe
from .. import _launch, load_sframe, aggregate
from . import util
import pandas as pd
from ..util.timezone import GMT
from pandas.util.testing import assert_frame_equal
import unittest
import datetime as dt
import tempfile
import os
import csv
import gzip
import string
import time
import numpy as np
import array
import math
import random
import shutil
import functools
import sys
import mock
import sqlite3
from .dbapi2_mock import dbapi2_mock
HAS_PYSPARK = True
try:
from pyspark import SparkContext, SQLContext
except:
HAS_PYSPARK = False
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
# Taken from http://stackoverflow.com/questions/1151658/python-hashable-dicts
# by Alex Martelli
class hashabledict(dict):
def __key(self):
return tuple((k,self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
class SFrameTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.a_to_z = [str(chr(97 + i)) for i in range(0, 26)]
self.dataframe = pd.DataFrame({'int_data': self.int_data, 'float_data': self.float_data, 'string_data': self.string_data})
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
self.int_data2 = range(50,60)
self.float_data2 = [1.0 * i for i in range(50,60)]
self.string_data2 = [str(i) for i in range(50,60)]
self.dataframe2 = pd.DataFrame({'int_data': self.int_data2, 'float_data': self.float_data2, 'string_data': self.string_data2})
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0))]
self.all_type_cols = [self.int_data,
self.float_data,
self.string_data,
self.vec_data,
self.list_data,
self.dict_data,
self.datetime_data*5]
self.sf_all_types = SFrame({"X"+str(i[0]):i[1] for i in zip(range(1,8),
self.all_type_cols)})
# Taken from http://en.wikipedia.org/wiki/Join_(SQL) for fun.
self.employees_sf = SFrame()
self.employees_sf.add_column(SArray(['Rafferty','Jones','Heisenberg','Robinson','Smith','John']), 'last_name')
self.employees_sf.add_column(SArray([31,33,33,34,34,None]), 'dep_id')
# XXX: below are only used by one test!
self.departments_sf = SFrame()
self.departments_sf.add_column(SArray([31,33,34,35]), 'dep_id')
self.departments_sf.add_column(SArray(['Sales','Engineering','Clerical','Marketing']), 'dep_name')
def __assert_sarray_equal(self, sa1, sa2):
l1 = list(sa1)
l2 = list(sa2)
self.assertEquals(len(l1), len(l2))
for i in range(len(l1)):
v1 = l1[i]
v2 = l2[i]
if v1 == None:
self.assertEqual(v2, None)
else:
if type(v1) == dict:
self.assertEquals(len(v1), len(v2))
for key in v1:
self.assertTrue(key in v1)
self.assertEqual(v1[key], v2[key])
elif (hasattr(v1, "__iter__")):
self.assertEquals(len(v1), len(v2))
for j in range(len(v1)):
t1 = v1[j]; t2 = v2[j]
if (type(t1) == float):
if (math.isnan(t1)):
self.assertTrue(math.isnan(t2))
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(v1, v2)
def test_split_datetime(self):
from_zone = GMT(0)
to_zone = GMT(4.5)
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
central = utc.astimezone(to_zone)
sa = SArray([utc,central])
expected = SFrame()
expected ['X.year'] = [2011,2011]
expected ['X.month'] = [1,1]
expected ['X.day'] = [21,21]
expected ['X.hour'] = [2,7]
expected ['X.minute'] = [37,7]
expected ['X.second'] = [21,21]
expected ['X.tzone'] = [0.0,4.5]
result = sa.split_datetime(tzone=True)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column names
expected = SFrame()
expected ['ttt.year'] = [2011,2011]
expected ['ttt.minute'] = [37,7]
expected ['ttt.second'] = [21,21]
result = sa.split_datetime(column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sf = SFrame({'datetime': sa})
result = sf.split_datetime('datetime', column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
def __test_equal(self, sf, df):
self.assertEquals(sf.num_rows(), df.shape[0])
self.assertEquals(sf.num_cols(), df.shape[1])
assert_frame_equal(sf.to_dataframe(), df)
def __create_test_df(self, size):
int_data = []
float_data = []
string_data = []
for i in range(0,size):
int_data.append(i)
float_data.append(float(i))
string_data.append(str(i))
return pd.DataFrame({'int_data': int_data,
'float_data': float_data,
'string_data': string_data})
# Test if the rows are all the same...row order does not matter.
# (I do expect column order to be the same)
def __assert_join_results_equal(self, sf, expected_sf):
_assert_sframe_equal(sf, expected_sf, check_row_order=False)
def test_creation_from_dataframe(self):
# created from empty dataframe
sf_empty = SFrame(data=pd.DataFrame())
self.__test_equal(sf_empty, pd.DataFrame())
sf = SFrame(data=self.dataframe, format='dataframe')
self.__test_equal(sf, self.dataframe)
sf = SFrame(data=self.dataframe, format='auto')
self.__test_equal(sf, self.dataframe)
original_p = pd.DataFrame({'a':[1.0, float('nan')]})
effective_p = pd.DataFrame({'a':[1.0, None]})
sf = SFrame(data=original_p)
self.__test_equal(sf, effective_p)
original_p = pd.DataFrame({'a':['a',None,'b']})
sf = SFrame(data=original_p)
self.__test_equal(sf, original_p)
def test_auto_parse_csv(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:
df = pd.DataFrame({'float_data': self.float_data,
'int_data': self.int_data,
'string_data': self.a_to_z[:len(self.int_data)]})
df.to_csv(csvfile, index=False)
csvfile.close()
sf = SFrame.read_csv(csvfile.name, header=True)
self.assertEqual(sf.dtype(), [float, int, str])
self.__test_equal(sf, df)
def test_parse_csv(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:
self.dataframe.to_csv(csvfile, index=False)
csvfile.close()
# list type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints=[int, int, str])
self.assertEqual(sf.dtype(), [int, int, str])
sf['int_data'] = sf['int_data'].astype(int)
sf['float_data'] = sf['float_data'].astype(float)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
# list type hints, incorrect number of columns
self.assertRaises(RuntimeError,
lambda: SFrame.read_csv(csvfile.name,
column_type_hints=[int, float]))
# dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'int_data': int,
'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# partial dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# single value type hints
sf = SFrame.read_csv(csvfile.name, column_type_hints=str)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.__test_equal(sf, all_string_column_df)
# single value type hints row limit
sf = SFrame.read_csv(csvfile.name, column_type_hints=str, nrows=5)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.assertEqual(len(sf), 5)
self.__test_equal(sf, all_string_column_df[0:len(sf)])
sf = SFrame.read_csv(csvfile.name)
sf2 = SFrame(csvfile.name, format='csv')
self.__test_equal(sf2, sf.to_dataframe())
f = open(csvfile.name, "w")
f.write('a,b,c\n')
f.write('NA,PIKA,CHU\n')
f.write('1.0,2,3\n')
f.close()
sf = SFrame.read_csv(csvfile.name,
na_values=['NA','PIKA','CHU'],
column_type_hints={'a':float,'b':int,'c':str})
t = list(sf['a'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 1.0)
t = list(sf['b'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 2)
t = list(sf['c'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], "3")
def test_save_load_file_cleanup(self):
# when some file is in use, file should not be deleted
with util.TempDirectory() as f:
sf = SFrame()
sf['a'] = SArray(range(1,1000000))
sf.save(f)
# many for each sarray, 1 sframe_idx, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# sf1 now references the on disk file
sf1 = SFrame(f);
# create another SFrame and save to the same location
sf2 = SFrame()
sf2['b'] = SArray([str(i) for i in range(1,100000)])
sf2['c'] = SArray(range(1, 100000))
sf2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# now sf1 should still be accessible
self.__test_equal(sf1, sf.to_dataframe())
# and sf2 is correct too
sf3 = SFrame(f)
self.__test_equal(sf3, sf2.to_dataframe())
# when sf1 goes out of scope, the tmp files should be gone
sf1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
def test_save_load(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f)
sf2 = load_sframe(f)
self.__test_equal(sf2, self.dataframe)
# Check individual formats with the SFrame constructor
formats = ['.csv']
for suffix in formats:
f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name)
sf2 = SFrame(f.name)
sf2['int_data'] = sf2['int_data'].astype(int)
sf2['float_data'] = sf2['float_data'].astype(float)
sf2['string_data'] = sf2['string_data'].astype(str)
self.__test_equal(sf2, self.dataframe)
g=SArray([['a','b',3],[{'a':'b'}],[1,2,3]])
g2=SFrame()
g2['x']=g
g2.save(f.name)
g3=SFrame.read_csv(f.name,column_type_hints=list)
self.__test_equal(g2, g3.to_dataframe())
f.close()
os.unlink(f.name)
# Make sure this file don't exist before testing
self.assertRaises(IOError, lambda: SFrame(data='__no_such_file__.frame_idx', format='sframe'))
if sys.platform != 'win32':
# Bad permission
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sf.save(os.path.join(test_dir, 'bad.frame_idx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sf2 = SFrame(os.path.join(test_dir, 'bad.frame_idx'))
# cleanup
os.removedirs(test_dir)
del sf2
def test_save_load_reference(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
originallen = len(sf)
sf.save(f)
del sf
sf = SFrame(f)
# make a new column of "1s and save it back
int_data2 = sf['int_data'] + 1
int_data2.__materialize__()
sf['int_data2'] = int_data2
sf._save_reference(f)
del sf
sf = SFrame(f)
self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())
# try to append and save reference
expected = sf.to_dataframe()
sf = sf.append(sf)
sf._save_reference(f)
sf = SFrame(f)
self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())
self.assertEquals(2 * originallen, len(sf))
assert_frame_equal(sf[originallen:].to_dataframe(), expected)
assert_frame_equal(sf[:originallen].to_dataframe(), expected)
def test_save_to_csv(self):
f = tempfile.NamedTemporaryFile(suffix='.csv', delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='csv')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False)
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
import csv
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_MINIMAL)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_ALL)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_NONE)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
# Pandas compatibility options
sf.export_csv(f.name, sep=':', lineterminator='\r\n', doublequote=False, quotechar='\'', quote_level=csv.QUOTE_NONE)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, sep=':', lineterminator='\r\n', doublequote=False, quotechar='\'')
self.__test_equal(sf2, self.dataframe)
f.close()
os.unlink(f.name)
def test_save_to_json(self):
f = tempfile.NamedTemporaryFile(suffix='.json', delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='json')
sf2 = SFrame.read_json(f.name)
# the float column will be parsed as integer
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.export_json(f.name)
sf2 = SFrame.read_json(f.name)
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.export_json(f.name, orient='lines')
sf2 = SFrame.read_json(f.name, orient='lines')
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
f.close()
os.unlink(f.name)
def _remove_sframe_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
os.remove(f)
def test_creation_from_txt(self):
f = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
df = self.dataframe[['string_data']]
df.to_csv(f.name, index=False)
sf = SFrame(f.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
fgzip = tempfile.NamedTemporaryFile(suffix='.txt.gz', delete=False)
f_in = open(f.name, 'rb')
f_out = gzip.open(fgzip.name, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
sf = SFrame(fgzip.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
fgzip.close()
os.unlink(fgzip.name)
f.close()
os.unlink(f.name)
def test_creation_from_csv_on_local(self):
if os.path.exists('./foo.csv'):
os.remove('./foo.csv')
with open('./foo.csv', 'w') as f:
url = f.name
basesf = SFrame(self.dataframe)
basesf.save(url, format="csv")
f.close()
sf = SFrame('./foo.csv')
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
sf = SFrame(url)
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
os.remove(url)
def test_alternate_line_endings(self):
# test Windows line endings
if os.path.exists('./windows_lines.csv'):
os.remove('./windows_lines.csv')
windows_file_url = None
with open('./windows_lines.csv', 'w') as f:
windows_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(windows_file_url)
def test_skip_rows(self):
# test line skippng
if os.path.exists('./skip_lines.csv'):
os.remove('./skip_lines.csv')
skip_file_url = None
with open('./skip_lines.csv', 'w') as f:
f.write("trash\n");
f.write("junk\n");
skip_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(skip_file_url)
def test_creation_from_csv_on_http(self):
pass
# sf = SFrame(data=self.url, use_header=False)
# self.__test_equal(sf, pd.DataFrame({'1': self.a_to_z}))
def test_creation_from_csv_on_s3(self):
# Requires s3 account for jenkins
# sf = SFrame(data='s3://graphlab-testdata/foo.csv')
# print sf.head(sf.num_rows())
pass
def test_creation_from_csv_dir_local(self):
csv_dir = "./csv_dir"
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.mkdir(csv_dir)
for i in range(0, 100):
with open(os.path.join(csv_dir, 'foo.%d.csv' % i), 'w') as f:
url = f.name
self.dataframe.to_csv(url, index=False)
f.close()
singleton_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.0.csv"))
self.assertEquals(singleton_sf.num_rows(), 10)
many_sf = SFrame.read_csv(csv_dir)
self.assertEquals(many_sf.num_rows(), 1000)
glob_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.*2.csv"))
self.assertEquals(glob_sf.num_rows(), 100)
with self.assertRaises(RuntimeError):
SFrame.read_csv("missingdirectory")
with self.assertRaises(ValueError):
SFrame.read_csv("")
shutil.rmtree(csv_dir)
def test_creation_from_iterable(self):
# Normal dict of lists
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
df = pd.DataFrame(the_dict)
self.__test_equal(sf, df)
# Test that a missing value does not change the data type
the_dict['ints'][0] = None
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), int)
# numpy.nan is actually a float, so it should cast the column to float
the_dict['ints'][0] = np.nan
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), float)
# Just a single list
sf = SFrame(self.int_data)
df = pd.DataFrame(self.int_data)
df.columns = ['X1']
self.__test_equal(sf, df)
# Normal list of lists
list_of_lists = [[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]
sf = SFrame(list_of_lists)
cntr = 0
for i in sf:
self.assertEquals(list_of_lists[cntr], list(i['X1']))
cntr += 1
self.assertEquals(sf.num_columns(), 1)
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
sf2 = SFrame({'ints':sf['ints'],'floats':sf['floats'],'strings':sf['strings']})
df = pd.DataFrame(the_dict)
self.__test_equal(sf2, df)
sf2 = SFrame([sf['ints'],sf['floats'],sf['strings']])
self.assertEquals(['X1','X2','X3'],sf2.column_names())
sf2.rename({'X1':'ints','X2':'floats','X3':'strings'})
sf2=sf2[['floats','ints','strings']]
self.__test_equal(sf2, df)
sf = SFrame({'text': ('foo', 'bar', 'biz')})
df = pd.DataFrame({'text': ['foo', 'bar', 'biz']})
self.__test_equal(sf, df)
def test_head_tail(self):
sf = SFrame(data=self.dataframe)
assert_frame_equal(sf.head(4).to_dataframe(), self.dataframe.head(4))
# Cannot test for equality the same way because of dataframe indices
taildf = sf.tail(4)
for i in range(0, 4):
self.assertEqual(taildf['int_data'][i], self.dataframe['int_data'][i+6])
self.assertEqual(taildf['float_data'][i], self.dataframe['float_data'][i+6])
self.assertEqual(taildf['string_data'][i], self.dataframe['string_data'][i+6])
def test_head_tail_edge_case(self):
sf = SFrame()
self.assertEquals(sf.head().num_columns(), 0)
self.assertEquals(sf.tail().num_columns(), 0)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
sf = SFrame()
sf['a'] = []
self.assertEquals(sf.head().num_columns(), 1)
self.assertEquals(sf.tail().num_columns(), 1)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
def test_transform(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname], sf.column_types()[i])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'], float)
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
def test_transform_with_recursion(self):
sf = SFrame(data={'a':[0,1,2,3,4], 'b':['0','1','2','3','4']})
# this should be the equivalent to sf.apply(lambda x:x since a is
# equivalent to range(4)
sa = sf.apply(lambda x: sf[x['a']])
sb = sf.apply(lambda x: x)
self.__assert_sarray_equal(sa, sb)
def test_transform_with_type_inference(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'])
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
# SFrame apply returns list of vector of numeric should be vector, not list
sa = sf.apply(lambda x: [x['int_data'], x['float_data']])
self.assertEqual(sa.dtype(), array.array);
def test_transform_with_exception(self):
sf = SFrame(data=self.dataframe)
self.assertRaises(KeyError, lambda: sf.apply(lambda x: x['some random key'])) # cannot find the key
self.assertRaises(TypeError, lambda: sf.apply(lambda x: sum(x.values()))) # lambda cannot sum int and str
self.assertRaises(ZeroDivisionError, lambda: sf.apply(lambda x: x['int_data'] / 0)) # divide by 0 error
self.assertRaises(IndexError, lambda: sf.apply(lambda x: list(x.values())[10])) # index out of bound error
def test_empty_transform(self):
sf = SFrame()
b = sf.apply(lambda x:x)
self.assertEquals(len(b.head()), 0)
def test_flatmap(self):
# Correctness of typical usage
n = 10
sf = SFrame({'id': range(n)})
new_sf = sf.flat_map(["id_range"], lambda x: [[str(i)] for i in range(x['id'])])
self.assertEqual(new_sf.column_names(), ["id_range"])
self.assertEqual(new_sf.column_types(), [str])
expected_col = [str(x) for i in range(n) for x in range(i)]
self.assertListEqual(list(new_sf['id_range']), expected_col)
# Empty SFrame, without explicit column types
sf = SFrame()
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id_range'],
lambda x: [[i] for i in range(x['id'])])
# Empty rows successfully removed
sf = SFrame({'id': range(15)})
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 8 else [])
self.assertEqual(new_sf.num_rows(), 6)
# First ten rows are empty raises error
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 9 else [])
def test_select_column(self):
sf = SFrame(data=self.dataframe)
sub_sf = sf.select_columns(['int_data', 'string_data'])
exp_df = pd.DataFrame({'int_data': self.int_data, 'string_data': self.string_data})
self.__test_equal(sub_sf, exp_df)
with self.assertRaises(ValueError):
sf.select_columns(['int_data', 'string_data', 'int_data'])
# test indexing
sub_col = sf['float_data']
self.assertEqual(list(sub_col.head(10)), self.float_data)
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(['duh',1])
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(0)
with self.assertRaises(RuntimeError):
sub_sf = sf.select_columns(['not_a_column'])
self.assertEqual(sf.select_columns([int]).column_names(), ['int_data'])
self.assertEqual(sf.select_columns([int, str]).column_names(), ['int_data', 'string_data'])
self.assertEqual(sf[int].column_names(), ['int_data'])
self.assertEqual(sf[[int, str]].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf[int, str].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf['int_data', 'string_data'].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf['string_data', 'int_data'].column_names(), ['string_data', 'int_data'])
sf = SFrame()
with self.assertRaises(RuntimeError):
sf.select_column('x')
with self.assertRaises(RuntimeError):
sf.select_columns(['x'])
sf.add_column(SArray(), 'x')
# does not throw
sf.select_column('x')
sf.select_columns(['x'])
with self.assertRaises(RuntimeError):
sf.select_column('y')
with self.assertRaises(RuntimeError):
sf.select_columns(['y'])
def test_topk(self):
sf = SFrame(data=self.dataframe)
# Test that order is preserved
df2 = sf.topk('int_data').to_dataframe()
df2_expected = self.dataframe.sort('int_data', ascending=False)
df2_expected.index = range(df2.shape[0])
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('float_data', 3).to_dataframe()
df2_expected = self.dataframe.sort('float_data', ascending=False).head(3)
df2_expected.index = range(3)
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('string_data', 3).to_dataframe()
for i in range(0, 3):
self.assertEqual(df2['int_data'][2-i], i + 7)
with self.assertRaises(TypeError):
sf.topk(2,3)
sf = SFrame()
sf.add_column(SArray([1,2,3,4,5]), 'a')
sf.add_column(SArray([1,2,3,4,5]), 'b')
sf.topk('a', 1) # should not fail
def test_filter(self):
sf = SFrame(data=self.dataframe)
filter_sa = SArray([1,1,1,0,0,0,0,1,1,1])
sf2 = sf[filter_sa]
exp_df = sf.head(3).append(sf.tail(3))
self.__test_equal(sf2, exp_df.to_dataframe())
# filter by 1s
sf2 = sf[SArray(self.int_data)]
exp_df = sf.head(10).to_dataframe()
self.__test_equal(sf2, exp_df)
# filter by 0s
sf2 = sf[SArray([0,0,0,0,0,0,0,0,0,0])]
exp_df = sf.head(0).to_dataframe()
self.__test_equal(sf2, exp_df)
# wrong size
with self.assertRaises(IndexError):
sf2 = sf[SArray([0,1,205])]
# slightly bigger size
sf = SFrame()
n = 1000000
sf['a'] = range(n)
result = sf[sf['a'] == -1]
self.assertEquals(len(result), 0)
result = sf[sf['a'] > n - 123]
self.assertEquals(len(result), 122)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i + n - 122, l[i])
result = sf[sf['a'] < 2000]
self.assertEquals(len(result), 2000)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i, l[i])
def test_sample_split(self):
sf = SFrame(data=self.__create_test_df(100))
entry_list = set()
for i in sf:
entry_list.add(str(i))
sample_sf = sf.sample(.12, 9)
sample_sf2 = sf.sample(.12, 9)
self.assertEqual(len(sample_sf), len(sample_sf2))
assert_frame_equal(sample_sf.head().to_dataframe(), sample_sf2.head().to_dataframe())
for i in sample_sf:
self.assertTrue(str(i) in entry_list)
with self.assertRaises(ValueError):
sf.sample(3)
sample_sf = SFrame().sample(.12, 9)
self.assertEqual(len(sample_sf), 0)
a_split = sf.random_split(.12, 9)
first_split_entries = set()
for i in a_split[0]:
first_split_entries.add(str(i))
for i in a_split[1]:
self.assertTrue(str(i) in entry_list)
self.assertTrue(str(i) not in first_split_entries)
with self.assertRaises(ValueError):
sf.random_split(3)
self.assertEqual(len(SFrame().random_split(.4)[0]), 0)
self.assertEqual(len(SFrame().random_split(.4)[1]), 0)
# tests add_column, rename
def test_edit_column_ops(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
# Make sure auto names work
names = sf.column_names()
cntr = 1
for i in names:
self.assertEquals("X"+str(cntr), i)
cntr = cntr + 1
# Remove a column
del sf['X2']
# names
names = sf.column_names()
self.assertEquals(len(names), 2)
self.assertEquals('X1', names[0])
self.assertEquals('X3', names[1])
# check content
self.assertEquals(list(sf['X1'].head(10)), self.int_data)
self.assertEquals(list(sf['X3'].head(10)), self.string_data)
# check that a new automatically named column will not conflict
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
if len(uniq_set) == 1:
self.assertEquals(list(sf[i].head(10)), self.int_data)
else:
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), 3)
# replacing columns preserves order
names = sf.column_names()
for n in names:
sf[n] = sf[n].apply(lambda x: x)
self.assertEquals(sf.column_names(), names)
# do it again!
del sf['X1']
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), len(names))
# standard rename
rename_dict = {'X3':'data','X3.1':'more_data','X3.2':'even_more'}
sf.rename(rename_dict)
self.assertEquals(sf.column_names(), ['data','more_data','even_more'])
# rename a column to a name that's already taken
with self.assertRaises(RuntimeError):
sf.rename({'data':'more_data'})
# try to rename a column that doesn't exist
with self.assertRaises(ValueError):
sf.rename({'foo':'bar'})
# pass something other than a dict
with self.assertRaises(TypeError):
sf.rename('foo')
# Setting a column to const preserves order
names = sf.column_names()
for n in names:
sf[n] = 1
self.assertEquals(sf.column_names(), names)
def test_duplicate_add_column_failure(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data), "hello")
with self.assertRaises(RuntimeError):
sf.add_column(SArray(self.float_data), "hello")
def test_remove_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
sf2 = sf.remove_column('X3')
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X4', 'X5'])
sf2 = sf.remove_columns(['X2', 'X5'])
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X4'])
# with a generator expression
sf2 = sf.remove_columns((n for n in ['X1', 'X5'] if n in sf.column_names()))
assert sf is sf2
self.assertEquals(sf.column_names(), ['X4'])
def test_remove_bad_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_column('bad'))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_columns(['X1', 'X2', 'X3', 'bad', 'X4']))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
def __generate_synthetic_sframe__(self, num_users):
"""
synthetic collaborative data.
generate 1000 users, user i watched movie 0, ... i-1.
rating(i, j) = i + j
length(i, j) = i - j
"""
sf = SFrame()
sparse_matrix = {}
for i in range(1, num_users + 1):
sparse_matrix[i] = [(j, i + j, i - j) for j in range(1, i + 1)]
user_ids = []
movie_ids = []
ratings = []
length_of_watching = []
for u in sparse_matrix:
user_ids += [u] * len(sparse_matrix[u])
movie_ids += [x[0] for x in sparse_matrix[u]]
ratings += [x[1] for x in sparse_matrix[u]]
length_of_watching += [x[2] for x in sparse_matrix[u]]
# typical add column stuff
sf['user_id'] = (SArray(user_ids, int))
sf['movie_id'] = (SArray(movie_ids, str))
sf['rating'] = (SArray(ratings, float))
sf['length'] = (SArray(length_of_watching, int))
return sf
def test_aggregate_ops(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf.__materialize__()
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.SUM('vector_values'),
aggregate.MEAN('vector_values'),
aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value'),
aggregate.FREQ_COUNT('value')]
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['Count'][0], m)
self.assertEqual(sf2['Sum of value'][0], sum(values))
self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))
self.assertEqual(sf2['Min of value'][0], min(values))
self.assertEqual(sf2['Max of value'][0], max(values))
self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))
self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))
np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),
list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),
list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['Count Distinct of value'][0],
len(np.unique(values)))
self.assertEqual(sorted(sf2['Distinct of value'][0]),
sorted(list(np.unique(values))))
self.assertEqual(sf2['Frequency Count of value'][0],
{k:1 for k in np.unique(values)})
# For vectors
def test_min_max_with_missing_values(self):
"""
Test builtin groupby aggregators
"""
sf = SFrame()
sf['key'] = [1,1,1,1,1,1,2,2,2,2]
sf['value'] = [1,None,None,None,None,None, None,None,None,None]
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value'), aggregate.FREQ_COUNT('value')]
sf2 = sf.groupby('key', built_ins).sort('key')
self.assertEqual(list(sf2['Count']), [6,4])
self.assertEqual(list(sf2['Sum of value']), [1, 0])
self.assertEqual(list(sf2['Avg of value']), [1, None])
self.assertEqual(list(sf2['Min of value']), [1, None])
self.assertEqual(list(sf2['Max of value']), [1, None])
self.assertEqual(list(sf2['Var of value']), [0, 0])
self.assertEqual(list(sf2['Stdv of value']), [0, 0])
self.assertEqual(list(sf2['Count Distinct of value']), [2, 1])
self.assertEqual(set(sf2['Distinct of value'][0]), set([1, None]))
self.assertEqual(set(sf2['Distinct of value'][1]), set([None]))
self.assertEqual(sf2['Frequency Count of value'][0], {1:1, None:5})
self.assertEqual(sf2['Frequency Count of value'][1], {None:4})
def test_aggregate_ops_on_lazy_frame(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf['value'] = sf['value'] + 0
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.SUM('vector_values'),
aggregate.MEAN('vector_values'),
aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value')]
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['Count'][0], m)
self.assertEqual(sf2['Sum of value'][0], sum(values))
self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))
self.assertEqual(sf2['Min of value'][0], min(values))
self.assertEqual(sf2['Max of value'][0], max(values))
self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))
self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))
np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),
list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),
list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['Count Distinct of value'][0],
len(np.unique(values)))
self.assertEqual(sorted(sf2['Distinct of value'][0]),
sorted(np.unique(values)))
def test_aggregate_ops2(self):
"""
Test builtin groupby aggregators using explicit named columns
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
built_ins = {'count':aggregate.COUNT,
'sum':aggregate.SUM('value'),
'avg':aggregate.AVG('value'),
'avg2':aggregate.MEAN('value'),
'min':aggregate.MIN('value'),
'max':aggregate.MAX('value'),
'var':aggregate.VAR('value'),
'var2':aggregate.VARIANCE('value'),
'stdv':aggregate.STD('value'),
'stdv2':aggregate.STDV('value'),
'vector_sum': aggregate.SUM('vector_values'),
'vector_mean': aggregate.MEAN('vector_values'),
'count_unique':aggregate.COUNT_DISTINCT('value'),
'unique':aggregate.DISTINCT('value'),
'frequency':aggregate.FREQ_COUNT('value')}
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['count'][0], m)
self.assertEqual(sf2['sum'][0], sum(values))
self.assertAlmostEqual(sf2['avg'][0], np.mean(values))
self.assertAlmostEqual(sf2['avg2'][0], np.mean(values))
self.assertEqual(sf2['min'][0], min(values))
self.assertEqual(sf2['max'][0], max(values))
self.assertAlmostEqual(sf2['var'][0], np.var(values))
self.assertAlmostEqual(sf2['var2'][0], np.var(values))
self.assertAlmostEqual(sf2['stdv'][0], np.std(values))
self.assertAlmostEqual(sf2['stdv2'][0], np.std(values))
np.testing.assert_almost_equal(sf2['vector_sum'][0], list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(sf2['vector_mean'][0], list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['count_unique'][0], len(np.unique(values)))
self.assertEqual(sorted(sf2['unique'][0]),
sorted(np.unique(values)))
self.assertEqual(sf2['frequency'][0],
{k:1 for k in np.unique(values)})
def test_groupby(self):
"""
Test builtin groupby and aggregate on different column types
"""
num_users = 500
sf = self.__generate_synthetic_sframe__(num_users=num_users)
built_ins = [aggregate.COUNT(), aggregate.SUM('rating'),
aggregate.AVG('rating'), aggregate.MIN('rating'),
aggregate.MAX('rating'), aggregate.VAR('rating'),
aggregate.STDV('rating')]
built_in_names = ['Sum', 'Avg', 'Min', 'Max', 'Var', 'Stdv']
"""
Test groupby user_id and aggregate on rating
"""
sf_user_rating = sf.groupby('user_id', built_ins)
actual = sf_user_rating.column_names()
expected = ['%s of rating' % v for v in built_in_names] \
+ ['user_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_user_rating:
uid = row['user_id']
mids = range(1, uid + 1)
ratings = [uid + i for i in mids]
expected = [len(ratings), sum(ratings), np.mean(ratings),
min(ratings), max(ratings), np.var(ratings),
np.sqrt(np.var(ratings))]
actual = [row['Count']] + [row['%s of rating' % op] \
for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
"""
Test that count can be applied on empty aggregate column.
"""
sf_user_rating = sf.groupby("user_id", {'counter': aggregate.COUNT()})
actual = {x['user_id']: x['counter'] for x in sf_user_rating}
expected = {i: i for i in range(1, num_users + 1)}
self.assertDictEqual(actual, expected)
"""
Test groupby movie_id and aggregate on length_of_watching
"""
built_ins = [aggregate.COUNT(), aggregate.SUM('length'),
aggregate.AVG('length'), aggregate.MIN('length'),
aggregate.MAX('length'), aggregate.VAR('length'),
aggregate.STDV('length')]
sf_movie_length = sf.groupby('movie_id', built_ins)
actual = sf_movie_length.column_names()
expected = ['%s of length' % v for v in built_in_names] \
+ ['movie_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_movie_length:
mid = row['movie_id']
uids = range(int(mid), num_users + 1)
values = [i - int(mid) for i in uids]
expected = [len(values), sum(values), np.mean(values), min(values),
max(values), np.var(values), np.std(values)]
actual = [row['Count']] + [row['%s of length' % op] \
for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
def test_quantile_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
# max and min rating for each user
g = sf.groupby('user_id', [aggregate.MIN('rating'),
aggregate.MAX('rating'),
aggregate.QUANTILE('rating', 0, 1)])
self.assertEquals(len(g), 500)
for row in g:
minrating = row['Min of rating']
maxrating = row['Max of rating']
arr = list(row['Quantiles of rating'])
self.assertEquals(len(arr), 2)
self.assertEquals(arr[0], minrating)
self.assertEquals(arr[1], maxrating)
def test_argmax_argmin_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_ret = sf.groupby('user_id',
{'movie with max rating' : aggregate.ARGMAX('rating','movie_id'),
'movie with min rating' : aggregate.ARGMIN('rating','movie_id')})
self.assertEquals(len(sf_ret), 500)
self.assertEqual(sf_ret["movie with max rating"].dtype(), str)
self.assertEqual(sf_ret["movie with min rating"].dtype(), str)
self.assertEqual(sf_ret["user_id"].dtype(), int)
# make sure we have computed correctly.
max_d = {}
min_d = {}
for i in sf:
key = i['user_id']
if key not in max_d:
max_d[key] = (i['movie_id'],i['rating'])
min_d[key] = (i['movie_id'],i['rating'])
else:
if max_d[key][1] < i['rating']:
max_d[key] = (i['movie_id'],i['rating'])
if min_d[key][1] > i['rating']:
min_d[key] = (i['movie_id'],i['rating'])
for i in sf_ret:
key = i['user_id']
self.assertEqual(i["movie with max rating"],max_d[key][0])
self.assertEqual(i["movie with min rating"],min_d[key][0])
def test_multicolumn_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_um = sf.groupby(["user_id", "movie_id"], aggregate.COUNT)
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
sf_um = sf.groupby(["movie_id", "user_id"], aggregate.COUNT())
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
def __assert_concat_result_equal(self, result, expected, list_columns):
self.assertEqual(result.num_columns(), expected.num_columns())
for column in result.column_names():
c1 = result[column]
c2 = expected[column]
self.assertEqual(c1.dtype(), c2.dtype())
self.assertEqual(c1.size(), c2.size())
if (column in list_columns):
for i in range(len(c1)):
if (c1[i] == None):
self.assertTrue(c2[i] == None)
continue
if (c1.dtype() == dict):
for k in c1[i]:
self.assertEqual(c2[i][k], c1[i][k])
else:
s1 = list(c1[i]);
if s1 != None: s1.sort()
s2 = list(c2[i]);
if s2 != None: s2.sort()
self.assertEqual(s1, s2)
else:
self.assertEqual(list(c1),list(c2))
def test_groupby_dict_key(self):
t = SFrame({'a':[{1:2},{3:4}]})
with self.assertRaises(TypeError):
t.groupby('a', {})
def test_concat(self):
sf = SFrame()
sf['a'] = [1,1,1,1, 2,2,2, 3, 4,4, 5]
sf['b'] = [1,2,1,2, 3,3,1, 4, None, 2, None]
sf['c'] = ['a','b','a','b', 'e','e', None, 'h', 'i','j', 'k']
sf['d'] = [1.0,2.0,1.0,2.0, 3.0,3.0,1.0, 4.0, None, 2.0, None]
sf['e'] = [{'x': 1}] * len(sf['a'])
print(sf['b'].dtype())
result = sf.groupby('a', aggregate.CONCAT('b'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of b': [[1.,1.,2.,2.],[1.,3.,3.],[4.],[2.], []]
})
expected_result['List of b'] = expected_result['List of b'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of b'])
result = sf.groupby('a', aggregate.CONCAT('d'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of d': [[1,1,2,2],[1,3,3],[4],[2], []]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of d'])
result = sf.groupby('a', {'c_c' :aggregate.CONCAT('c')})
expected_result = SFrame({
'a': [1,2,3,4, 5],
'c_c': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_c'])
result = sf.groupby('a', aggregate.CONCAT('b','c'))
expected_result = SFrame({
'a': [1,2,3,4,5],
'Dict of b_c': [{1:'a',2:'b'},{3:'e', 1: None},{4:'h'},{2:'j'}, {}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['Dict of b_c'])
result = sf.groupby('a', {'c_b':aggregate.CONCAT('c','b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'c_b': [{'a':1, 'b':2},{'e':3},{'h':4},{'i':None, 'j':2},{'k':None}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_b'])
result = sf.groupby('a', {'cs':aggregate.CONCAT('c'), 'bs':aggregate.CONCAT('b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'bs': [[1,1,2,2],[1,3,3],[4],[2], []],
'cs': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
expected_result['bs'] = expected_result['bs'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['bs','cs'])
#exception fail if there is not column
with self.assertRaises(TypeError):
sf.groupby('a', aggregate.CONCAT())
with self.assertRaises(KeyError):
sf.groupby('a', aggregate.CONCAT('nonexist'))
with self.assertRaises(TypeError):
sf.groupby('a', aggregate.CONCAT('e', 'a'))
def test_select_one(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
res = list(sf.groupby('a', {'b':aggregate.SELECT_ONE('b')}))
self.assertEqual(len(res), 5)
for i in res:
self.assertTrue(i['b'] == 2 * i['a'] or i['b'] == 2 * i['a'] - 1)
def test_unique(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
self.assertEqual(len(sf.unique()), 10)
vals = [1,1,2,2,3,3,4,4, None, None]
sf = SFrame({'a':vals,'b':vals})
res = sf.unique()
self.assertEqual(len(res), 5)
self.assertEqual(set(res['a']), set([1,2,3,4,None]))
self.assertEqual(set(res['b']), set([1,2,3,4,None]))
def test_append_empty(self):
sf_with_data = SFrame(data=self.dataframe)
empty_sf = SFrame()
self.assertFalse(sf_with_data.append(empty_sf) is sf_with_data)
self.assertFalse(empty_sf.append(sf_with_data) is sf_with_data)
self.assertFalse(empty_sf.append(empty_sf) is empty_sf)
def test_append_all_match(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_lazy(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
self.assertTrue(new_sf.__is_materialized__())
filter_sf1 = SArray([1 for i in range(sf1.num_rows())] + [0 for i in range(sf2.num_rows())])
filter_sf2 = SArray([0 for i in range(sf1.num_rows())] + [1 for i in range(sf2.num_rows())])
new_sf1 = new_sf[filter_sf1]
new_sf2 = new_sf[filter_sf2]
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
assert_frame_equal(sf1.to_dataframe(), new_sf1.to_dataframe())
assert_frame_equal(sf2.to_dataframe(), new_sf2.to_dataframe())
row = sf1.head(1)
sf = SFrame()
for i in range(10):
sf = sf.append(row)
df = sf.to_dataframe()
for i in range(10):
self.assertEqual(list(df.iloc[[i]]), list(sf.head(1).to_dataframe().iloc[[0]]))
def test_recursive_append(self):
sf = SFrame()
for i in range(200):
sf = sf.append(SFrame(data = self.dataframe))
#consume
sf.__materialize__()
def test_print_sframe(self):
sf = SFrame()
def _test_print():
sf.__repr__()
sf._repr_html_()
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
output = StringIO()
sf.print_rows(output_file=output)
n = 20
sf['int'] = [i for i in range(n)]
sf['float'] = [float(i) for i in range(n)]
sf['str'] = [str(i) for i in range(n)]
uc = '\xe5\xa4\xa7\xe5\xa4\xb4' # dato pronounced in chinese, big head
sf['unicode'] = [uc for i in range(n)]
sf['array'] = [array.array('d', [i]) for i in range(n)]
sf['list'] = [[i, float(i), [i]] for i in range(n)]
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
sf['dt'] = [utc for i in range(n)]
sf['img'] = [Image() for i in range(n)]
sf['long_str'] = ["".join([str(i)] * 50) for i in range(n)]
sf['long_unicode'] = ["".join([uc] * 50) for i in range(n)]
sf['bad_unicode'] = ['\x9d' + uc for i in range(n)]
_test_print()
def test_print_lazy_sframe(self):
sf1 = SFrame(data=self.dataframe)
self.assertTrue(sf1.__is_materialized__())
sf2 = sf1[sf1['int_data'] > 3]
sf2.__repr__()
sf2.__str__()
self.assertFalse(sf2.__is_materialized__())
len(sf2)
self.assertTrue(sf2.__is_materialized__())
def test_append_order_diff(self):
# name match but column order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
sf2.swap_columns('int_data', 'string_data')
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_empty_sframe(self):
sf = SFrame(data=self.dataframe)
other = SFrame()
# non empty append empty
assert_frame_equal(sf.append(other).to_dataframe(), self.dataframe)
# empty append non empty
assert_frame_equal(other.append(sf).to_dataframe(), self.dataframe)
#empty append empty
assert_frame_equal(other.append(other).to_dataframe(), pd.DataFrame())
def test_append_exception(self):
sf = SFrame(data=self.dataframe)
# column number not match
other = SFrame()
other.add_column(SArray(), "test")
self.assertRaises(RuntimeError, lambda: sf.append(other)) # column not the same
# column name not match
other = SFrame()
names = sf.column_names()
for name in sf.column_names():
other.add_column(SArray(), name)
names[0] = 'some name not match'
self.assertRaises(RuntimeError, lambda: sf.append(other))
# name match but column type order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
#change one column type
sf1["int_data"] = sf2.select_column("int_data").astype(float)
self.assertRaises(RuntimeError, lambda: sf.append(other))
def test_simple_joins(self):
inner_expected = SFrame()
inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name')
inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id')
inner_expected.add_column(SArray(['Clerical','Engineering','Clerical','Engineering','Sales']), 'dep_name')
# Tests the "natural join" case
beg = time.time()
res = self.employees_sf.join(self.departments_sf)
end = time.time()
print("Really small join: " + str(end-beg) + " s")
self.__assert_join_results_equal(res, inner_expected)
left_join_row = SFrame()
left_join_row.add_column(SArray(['John']), 'last_name')
left_join_row.add_column(SArray([None], int), 'dep_id')
left_join_row.add_column(SArray([None], str), 'dep_name')
left_expected = inner_expected.append(left_join_row)
# Left outer join, passing string to 'on'
res = self.employees_sf.join(self.departments_sf, how='left', on='dep_id')
self.__assert_join_results_equal(res, left_expected)
right_join_row = SFrame()
right_join_row.add_column(SArray([None], str), 'last_name')
right_join_row.add_column(SArray([35]), 'dep_id')
right_join_row.add_column(SArray(['Marketing']), 'dep_name')
right_expected = inner_expected.append(right_join_row)
# Right outer join, passing list to 'on'
res = self.employees_sf.join(self.departments_sf, how='right', on=['dep_id'])
self.__assert_join_results_equal(res, right_expected)
outer_expected = left_expected.append(right_join_row)
# Full outer join, passing dict to 'on'
res = self.employees_sf.join(self.departments_sf, how='outer', on={'dep_id':'dep_id'})
self.__assert_join_results_equal(res, outer_expected)
# Test a join on non-matching key
res = self.employees_sf.join(self.departments_sf, on={'last_name':'dep_name'})
self.assertEquals(res.num_rows(), 0)
self.assertEquals(res.num_cols(), 3)
self.assertEquals(res.column_names(), ['last_name', 'dep_id', 'dep_id.1'])
# Test a join on a non-unique key
bad_departments = SFrame()
bad_departments['dep_id'] = SArray([33,33,31,31])
bad_departments['dep_name'] = self.departments_sf['dep_name']
no_pk_expected = SFrame()
no_pk_expected['last_name'] = SArray(['Rafferty','Rafferty','Heisenberg','Jones','Heisenberg','Jones'])
no_pk_expected['dep_id'] = SArray([31,31,33,33,33,33])
no_pk_expected['dep_name'] = SArray(['Clerical','Marketing','Sales','Sales','Engineering','Engineering'])
res = self.employees_sf.join(bad_departments, on='dep_id')
self.__assert_join_results_equal(res, no_pk_expected)
# Left join on non-unique key
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected[['dep_id', 'dep_name', 'last_name']]
res = bad_departments.join(self.employees_sf, on='dep_id', how='left')
self.__assert_join_results_equal(res, no_pk_expected)
def test_big_composite_join(self):
# Create a semi large SFrame with composite primary key (letter, number)
letter_keys = []
number_keys = []
data = []
for i in string.ascii_lowercase:
for j in range(0,100):
letter_keys.append(i)
number_keys.append(j)
which = j % 3
if which == 0:
data.append(string.ascii_uppercase)
elif which == 1:
data.append(string.digits)
elif which == 2:
data.append(string.hexdigits)
pk_gibberish = SFrame()
pk_gibberish['letter'] = SArray(letter_keys, str)
pk_gibberish['number'] = SArray(number_keys, int)
pk_gibberish['data'] = SArray(data, str)
# Some rows that won't match
more_data = []
more_letter_keys = []
more_number_keys = []
for i in range(0,40000):
more_data.append('fish')
more_letter_keys.append('A')
more_number_keys.append(200)
for i in range(0,80):
for j in range(100,1000):
more_data.append('waffles')
more_letter_keys.append(letter_keys[j])
more_number_keys.append(number_keys[j])
# Non-matching row in this stretch
if j == 147:
more_letter_keys[-1] = 'A'
for i in range(0,5000):
more_data.append('pizza')
more_letter_keys.append('Z')
more_number_keys.append(400)
join_with_gibberish = SFrame()
join_with_gibberish['data'] = SArray(more_data, str)
join_with_gibberish['moredata'] = SArray(more_data, str)
join_with_gibberish['a_number'] = SArray(more_number_keys, int)
join_with_gibberish['a_letter'] = SArray(more_letter_keys, str)
expected_answer = SFrame()
exp_letter = []
exp_number = []
exp_data = []
for i in range(0,80):
exp_letter.extend(letter_keys[100:147])
exp_number.extend(number_keys[100:147])
exp_letter.extend(letter_keys[148:1000])
exp_number.extend(number_keys[148:1000])
exp_data.extend(data[100:147])
exp_data.extend(data[148:1000])
expected_answer['letter'] = SArray(exp_letter, str)
expected_answer['number'] = SArray(exp_number, int)
expected_answer['data'] = SArray(exp_data, str)
expected_answer['data.1'] = 'waffles'
expected_answer['moredata'] = 'waffles'
beg = time.time()
res = pk_gibberish.join(join_with_gibberish, on={'letter':'a_letter','number':'a_number'})
end = time.time()
print("Join took " + str(end-beg) + " seconds")
self.__assert_join_results_equal(res, expected_answer)
def test_convert_dataframe_empty(self):
sf = SFrame()
sf['a'] = SArray([], int)
df = sf.to_dataframe()
self.assertEqual(df['a'].dtype, int)
sf1 = SFrame(df)
self.assertEquals(sf1['a'].dtype(), int)
self.assertEqual(sf1.num_rows(), 0)
def test_replace_one_column(self):
sf = SFrame()
sf['a'] = [1,2,3]
self.assertEquals(list(sf['a']), [1,2,3])
# this should succeed as we are replacing a new column
sf['a'] = [1,2]
self.assertEquals(list(sf['a']), [1,2])
# failed to add new column should revert original sframe
with self.assertRaises(TypeError):
sf['a'] = [1,2,'a']
self.assertEquals(list(sf['a']), [1,2])
# add a column with different length should fail if there are more than one column
sf = SFrame()
sf['a'] = [1,2,3]
sf['b'] = ['a', 'b', 'c']
with self.assertRaises(RuntimeError):
sf['a'] = [1,2]
def test_filter_by(self):
# Set up SFrame to filter by
sf = SFrame()
sf.add_column(SArray(self.int_data), "ints")
sf.add_column(SArray(self.float_data), "floats")
sf.add_column(SArray(self.string_data), "strings")
# Normal cases
res = sf.filter_by(SArray(self.int_data), "ints")
self.__assert_join_results_equal(res, sf)
res = sf.filter_by(SArray(self.int_data), "ints", exclude=True)
self.assertEquals(list(res), [])
res = sf.filter_by([5,6], "ints")
exp = SFrame()
exp.add_column(SArray(self.int_data[4:6]), "ints")
exp.add_column(SArray(self.float_data[4:6]), "floats")
exp.add_column(SArray(self.string_data[4:6]), "strings")
self.__assert_join_results_equal(res, exp)
exp_opposite = SFrame()
exp_opposite.add_column(SArray(self.int_data[:4]+self.int_data[6:]), "ints")
exp_opposite.add_column(SArray(self.float_data[:4]+self.float_data[6:]), "floats")
exp_opposite.add_column(SArray(self.string_data[:4]+self.string_data[6:]), "strings")
res = sf.filter_by([5,6], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
exp_one = SFrame()
exp_one.add_column(SArray(self.int_data[4:5]), "ints")
exp_one.add_column(SArray(self.float_data[4:5]), "floats")
exp_one.add_column(SArray(self.string_data[4:5]), "strings")
exp_all_but_one = SFrame()
exp_all_but_one.add_column(SArray(self.int_data[:4]+self.int_data[5:]), "ints")
exp_all_but_one.add_column(SArray(self.float_data[:4]+self.float_data[5:]), "floats")
exp_all_but_one.add_column(SArray(self.string_data[:4]+self.string_data[5:]), "strings")
res = sf.filter_by(5, "ints")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
res = sf.filter_by("5", "strings")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
# Only missing values
res = sf.filter_by([77,77,88,88], "ints")
# Test against empty SFrame with correct columns/types
self.__assert_join_results_equal(res, exp_one[exp_one['ints'] == 9000])
res = sf.filter_by([77,77,88,88], "ints", exclude=True)
self.__assert_join_results_equal(res, sf)
# Duplicate values
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Duplicate and missing
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Type mismatch
with self.assertRaises(TypeError):
res = sf.filter_by(["hi"], "ints")
# Column doesn't exist
with self.assertRaises(KeyError):
res = sf.filter_by([1,2], "intssss")
# Something that can't be turned into an SArray
with self.assertRaises(Exception):
res = sf.filter_by({1:2,3:4}, "ints")
# column_name not given as string
with self.assertRaises(TypeError):
res = sf.filter_by(1,2)
# Duplicate column names after join. Should be last because of the
# renames.
sf.rename({'ints':'id','floats':'id1','strings':'id11'})
exp.rename({'ints':'id','floats':'id1','strings':'id11'})
exp_opposite.rename({'ints':'id','floats':'id1','strings':'id11'})
res = sf.filter_by([5,6], "id")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([5,6], "id", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# XXXXXX: should be inner function
def __test_to_from_dataframe(self, data, type):
sf = SFrame()
sf['a'] = data
df = sf.to_dataframe()
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
df = pd.DataFrame({'val': data})
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
def test_to_from_dataframe(self):
self.__test_to_from_dataframe([1,2,3], int)
self.__test_to_from_dataframe(['a', 'b', 'c'], str)
self.__test_to_from_dataframe([1.0, 2.0, 3.0], float)
self.__test_to_from_dataframe([[1, 'b', {'a': 1}], [1,2,3]], list)
self.__test_to_from_dataframe([{'a':1, 1:None}, {'b':2}], dict)
self.__test_to_from_dataframe([[1,2],[1,2],[]], array.array)
def test_pack_columns_exception(self):
sf = SFrame()
sf['a'] = [1, 2, 3, None, None]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
# cannot pack non array value into array
with self.assertRaises(TypeError):
sf.pack_columns(dtype=array.array)
# cannnot given non numeric na vlaue to array
with self.assertRaises(ValueError):
sf.pack_columns(dtype=array.array, fill_na='c')
# cannot pack non exist columns
with self.assertRaises(ValueError):
sf.pack_columns(['d','a'])
# dtype has to be dict/array/list
with self.assertRaises(ValueError):
sf.pack_columns(dtype=str)
# pack duplicate columns
with self.assertRaises(ValueError):
sf.pack_columns(['a','a'])
# pack partial columns to array, should fail if for columns that are not numeric
with self.assertRaises(TypeError):
sf.pack_columns(['a','b'], dtype=array.array)
with self.assertRaises(TypeError):
sf.pack_columns(column_prefix = 1)
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = '1')
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = 'c', columns=['a', 'b'])
def test_pack_columns2(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4]
sf['category.a'] = [None, '2', '3', None]
sf['category.b'] = [None, 2.0, None, 4.0]
expected = SArray([
[None, None],
['2', 2.0],
['3', None],
[None, 4.0]])
result = sf.pack_columns(column_prefix='category')
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['category'], expected)
result = sf.pack_columns(column_prefix='category', new_column_name="new name")
self.assertEqual(result.column_names(), ['id', 'new name'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['new name'], expected)
# default dtype is list
result = sf.pack_columns(column_prefix='category', dtype=list)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == True by default
expected = SArray([
{},
{'a':'2', 'b':2.0},
{'a':'3'},
{'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict)
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == False
expected = SArray([
{},
{'category.a':'2', 'category.b':2.0},
{'category.a':'3'},
{'category.b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, remove_prefix=False)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# fill_na
expected = SArray([
{'a':1, 'b':1},
{'a':'2', 'b':2.0},
{'a':'3', 'b':1},
{'a':1, 'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, fill_na = 1)
self.__assert_sarray_equal(result['category'], expected)
expected = SArray([
[1],
[2],
[3],
[4]], list)
result = sf.pack_columns(['id'], new_column_name='id')
self.assertEqual(sorted(result.column_names()), sorted(['id', 'category.a', 'category.b']))
self.__assert_sarray_equal(result['id'], expected)
def test_pack_columns(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4, 5]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
expected_all_default = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
# pack all columns, all default values
self.__assert_sarray_equal(sf.pack_columns()['X1'], expected_all_default)
expected_ab_default = SArray([
[1, None],
[2, '2'],
[3, '3'],
[4, None],
[5, '5']
])
expected_all_fillna_1 = SArray([
[1, -1, -1],
[2, '2', 2.0],
[3, '3', 3.0],
[4, -1, -1],
[5, '5', 5.0]
])
# pack all columns do not drop na and also fill with some value
result = sf.pack_columns(fill_na=-1)
self.assertEqual(result.column_names(), ['X1'])
self.__assert_sarray_equal(result['X1'], expected_all_fillna_1)
# pack partial columns, all default value
result = sf.pack_columns(['id','b'])
self.assertEqual(result.column_names(), ['c','X2'])
self.__assert_sarray_equal(result['c'], sf['c'])
self.__assert_sarray_equal(result['X2'], expected_ab_default)
expected_sarray_ac_fillna_default = SArray([
[1, float('NaN')],
[2, 2.0],
[3, 3.0],
[4, float('NaN')],
[5, 5.0]
])
result = sf.pack_columns(['id','c'], dtype=array.array)
self.assertEqual(result.column_names(), ['b', 'X2'])
self.__assert_sarray_equal(result['b'], sf['b'])
self.__assert_sarray_equal(result['X2'], expected_sarray_ac_fillna_default)
expected_dict_default = SArray([
{'id': 1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id':4 },
{'id':5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict)
self.__assert_sarray_equal(result['X1'], expected_dict_default)
expected_dict_fillna = SArray([
{'id': 1, 'b':-1, 'c': -1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id': 4, 'b':-1, 'c': -1},
{'id': 5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict, fill_na=-1)
self.__assert_sarray_equal(result['X1'], expected_dict_fillna)
# pack large number of rows
sf = SFrame()
num_rows = 100000
sf['a'] = range(0, num_rows);
sf['b'] = range(0, num_rows);
result = sf.pack_columns(['a', 'b']);
self.assertEqual(len(result), num_rows);
def test_pack_columns_dtype(self):
a = SFrame({'name':[-140500967,-1405039672],'data':[3,4]})
b = a.pack_columns(['name','data'],dtype=array.array)
expected = SArray([[-140500967, 3],[-1405039672,4]])
self.__assert_sarray_equal(b['X1'], expected)
def test_unpack_dict_mixtype(self):
sf = SFrame({'a':[{'a':["haha", "hoho"]}, {'a':array.array('d', [1,2,3])}]})
sf = sf.unpack('a', column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), list)
sf = SFrame({'a':[{'a':["haha", "hoho"]}, {'a':None}]})
sf = sf.unpack('a', column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), list)
sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':None}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), array.array)
sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':{'b':1}}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), str)
sa = SArray([{'a': 1, 'b': 0.1}, {'a': 0.1, 'b': 1}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), float)
self.assertEqual(sf['b'].dtype(), float)
def test_unpack_list(self):
sa = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
expected = SFrame()
expected ['a'] = [1, 2, 3, 4, 5]
expected ['b'] = [None, '2', '3', None, '5']
expected ['c'] = [None, 2.0, 3.0, None, 5.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(column_name_prefix='ttt');
self.assertEqual(result.column_names(), ['ttt.0', 'ttt.1', 'ttt.2'])
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = [1, 2, None, 4, 5]
e['b'] = [None, '2', '3', None, '5']
e['c'] = [None, 2.0, None, None, 5.0]
result = sa.unpack(na_value=3);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# wrong length
with self.assertRaises(TypeError):
sa.unpack(column_name_prefix=['a','b'])
# wrong type
with self.assertRaises(RuntimeError):
sa.unpack(column_types = [str, int, float])
# wrong limit types
with self.assertRaises(TypeError):
sa.unpack(limit=["1"])
# int array cannot be unpacked
with self.assertRaises(TypeError):
SArray([1,2,3,4]).unpack()
# column name must be a string
with self.assertRaises(TypeError):
sa.unpack(1)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = int)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = [np.array])
# cannot infer type if no values
with self.assertRaises(RuntimeError):
SArray([], list).unpack()
def test_unpack_array(self):
import array
sa = SArray([
array.array('d', [1, 1, 0]),
array.array('d', [2, -1, 1]),
array.array('d', [3, 3, 2]),
array.array('d', [-1, 2, 3]),
array.array('d', [5, 5, 4])
])
expected = SFrame()
expected ['a'] = [1.0, 2.0, 3.0, -1.0, 5.0]
expected ['b'] = [1.0, -1.0, 3.0, 2.0, 5.0]
expected ['c'] = [0.0, 1.0, 2.0, 3.0, 4.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# right amount column names
result = sa.unpack(column_name_prefix = 'unpacked');
result.rename(dict(zip(result.column_names(), ['t.0', 't.1', 't.2'])))
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
expected['a'] = expected['a'].astype(int)
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype(float)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = SArray([1, 2, 3, None, 5], float)
e['b'] = SArray([1, None, 3, 2, 5], float)
e['c'] = SArray([0, 1, 2, 3, 4], float)
result = sa.unpack(na_value=-1);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
def test_unpack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5,6,7]
sf["is_restaurant"] = [1, 1,0,0, 1, None, None]
sf["is_retail"] = [None,1,1,None,1, None, None]
sf["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
packed_sf = SFrame()
packed_sf['user_id'] = sf['user_id']
packed_sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
with self.assertRaises(TypeError):
packed_sf['user_id'].unpack()
with self.assertRaises(TypeError):
packed_sf['category'].unpack(1)
with self.assertRaises(TypeError):
packed_sf['category'].unpack(value_types = [int])
# unpack only one column
expected_sf = SFrame()
expected_sf["is_retail"] = sf["is_retail"]
unpacked_sf = packed_sf['category'].unpack(limit=["is_retail"], column_types=[int], column_name_prefix=None)
assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())
# unpack all
unpacked_sf = packed_sf['category'].unpack(column_name_prefix=None, column_types=[int, int, str], limit=["is_restaurant", "is_retail", "is_electronics"])
assert_frame_equal(unpacked_sf.to_dataframe(), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe())
# auto infer types, the column order may be different, so use order here before comparison
unpacked_sf = packed_sf["category"].unpack()
unpacked_sf.rename({
"X.is_restaurant": "is_restaurant",
"X.is_retail": "is_retail",
"X.is_electronics": "is_electronics"
})
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe().sort(axis=1))
unpacked_sf = packed_sf["category"].unpack(na_value = 0, column_name_prefix="new")
expected = SFrame()
expected["new.is_restaurant"] = [1, 1,None,None, 1, None, None]
expected["new.is_retail"] = [None,1,1,None,1, None, None]
expected["new.is_electronics"] = ["yes", "no","yes",None,"no", None, None]
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), expected.to_dataframe().sort(axis=1))
# unpack a dictionary key integer as key
sa = SArray([
{1: 'a'},
{2: 'b'}
])
result = sa.unpack()
expected = SFrame({'X.1':['a', None], 'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2])
expected = SFrame({'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2], column_name_prefix="expanded")
expected = SFrame({'expanded.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sa = SArray([{i:i} for i in range(500)])
unpacked_sa = sa.unpack()
self.assertEqual(len(unpacked_sa), len(sa))
i = 0
for v in unpacked_sa:
for j in range(500):
val = v['X.' + str(j)]
if (j == i):
self.assertEqual(val, i);
else:
self.assertEqual(val, None);
i = i + 1
# if types don't agree, convert to string automatically
sa = SArray([{'a':1},{'a': 'a_3'}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [str])
sa = SArray([{'a':None}, {'a': 1}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
sa = SArray([{'a':1}, {'a': None}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
# type inferrence is already at server side even if limit is given
sa = SArray([{'c'+str(i): i if i % 2 == 0 else 'v' + str(i)} for i in range(1000)])
unpacked = sa.unpack(limit=['c'+str(i) for i in range(10)], column_name_prefix="")
for i in range(10):
v = unpacked[i]
for j in range(10):
if (j != i):
self.assertEqual(v['c'+str(j)], None)
elif j % 2 == 0:
self.assertEqual(v['c'+str(j)], j)
else:
self.assertEqual(v['c'+str(j)], 'v' + str(j))
def test_unpack_sframe(self):
sf = SFrame()
sf['user_id'] = range(7)
sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
sf['list'] = [
None,
range(1),
range(2),
range(3),
range(1),
range(2),
range(3),
]
with self.assertRaises(TypeError):
sf.unpack('user_id')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list'] = sf['list']
expected["is_restaurant"] = [1, 1,0,0, 1, None, None]
expected["is_retail"] = [None,1,1,None,1, None, None]
expected["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
result = sf.unpack('category')
result.rename({
'category.is_restaurant': 'is_restaurant',
'category.is_retail': 'is_retail',
'category.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="")
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="abc")
result.rename({
'abc.is_restaurant': 'is_restaurant',
'abc.is_retail': 'is_retail',
'abc.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", column_types=[str], limit=['is_restaurant'])
new_expected = expected[['user_id', 'list', 'is_restaurant']]
new_expected['is_restaurant'] = new_expected['is_restaurant'].astype(str)
assert_frame_equal(new_expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", na_value = None)
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,2, None, None,2]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list', na_value= 2)
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,None, None, None,None]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
# auto resolving conflicting names
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [range(5) for i in range(100)]
sf['b.0'] = range(100)
sf['b.0.1'] = range(100)
result = sf.unpack('b')
self.assertEqual(result.column_names(), ['a', 'b.0', 'b.0.1', 'b.0.1.1', 'b.1.1.1', 'b.2.1.1', 'b.3.1.1', 'b.4.1.1'])
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [{'str1': i, 'str2':i + 1} for i in range(100)]
sf['b.str1'] = range(100)
result = sf.unpack('b')
self.assertEqual(len(result.column_names()), 4)
def test_stack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["category"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
{},
None]
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3,4,5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(ValueError):
sf.stack('category', ['user_id', 'value'])
# normal case
stacked_sf = sf.stack('category', ['category', 'value'])
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
# set column types
stacked_sf = sf.stack('category')
self.assertTrue(stacked_sf.column_types()[2] == str)
self.assertTrue(stacked_sf.column_types()[3] == int)
# auto generate column names
stacked_sf = sf.stack('category')
new_column_names = stacked_sf.column_names()
self.assertTrue(len(new_column_names) == 4)
expected_sf.rename({'category':new_column_names[2], 'value':new_column_names[3]})
df_expected = expected_sf.to_dataframe().sort(['user_id', new_column_names[2]]).reset_index(drop=True)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", new_column_names[2]]).reset_index(drop=True), df_expected)
#dropna
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3, 4, 5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
stacked_sf = sf.stack('category', ['category','value'], drop_na = False)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
sf = SFrame()
sf['a'] = SArray(([{}] * 100) + [{'a':1}])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a',['key', 'value'], new_column_type=[str])
with self.assertRaises(ValueError):
sf.stack('a',['key', 'value'], new_column_type=str)
sf.stack('a',['key', 'value'], new_column_type=[str, int])
expected_sf = SFrame()
expected_sf['key'] = SArray([None] * 100 + ["a"])
expected_sf['value'] = SArray([None] * 100 + [1])
def test_stack_list(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [['a', 'b'], ['c'], ['d'],['e', None], None]
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4,5]
expected_result['X1'] = ['a','b','c','d','e',None, None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4]
expected_result[result.column_names()[1]] = ['a','b','c','d','e',None]
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
sf = SFrame()
n = 1000000
sf['a'] = range(1,n)
sf['b'] = [[str(i), str(i+1)] for i in range(1,n)]
result = sf.stack('b')
self.assertTrue(len(result), n * 2)
sf = SFrame()
sf['a'] = SArray(([[]] * 100) + [['a','b']])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a', 'a', new_column_type=[str, int])
sf.stack('a', 'a', new_column_type=str)
expected_sf = SFrame()
expected_sf['a'] = SArray([None] * 100 + ["a", "b"])
def test_stack_vector(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [[1],[1,2],[1,2,3],[1,2,3,4],None]
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4,5]
expected_result['X1'] = [1,1,2,1,2,3,1,2,3,4,None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4]
expected_result[result.column_names()[1]] = SArray([1,1,2,1,2,3,1,2,3,4], float)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
import array
sf = SFrame()
sf['a'] = SArray(([array.array('d')] * 100) + [array.array('d',[1.0,2.0])])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a', 'a', new_column_type=[str, int])
sf.stack('a', 'a', new_column_type=int)
expected_sf = SFrame()
expected_sf['a'] = SArray([None] * 100 + [1, 2])
def test_unstack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["categories"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
None]
stacked_sf = sf.stack('categories', ['category', 'value'], drop_na=False)
# normal unstack
unstacked_sf = stacked_sf.unstack(column=['category', 'value'], new_column_name = 'categories')
# these frames are *almost* equal except user4 will be {} instead of None
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing new column name
unstacked_sf = stacked_sf.unstack(['category', 'value'])
self.assertEqual(len(unstacked_sf.column_names()), 3)
unstacked_sf.rename({unstacked_sf.column_names()[2] : 'categories'})
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing column names
with self.assertRaises(KeyError):
stacked_sf.unstack(['category','value1'])
# wrong input
with self.assertRaises(TypeError):
stacked_sf.unstack(['category'])
# duplicate new column name
with self.assertRaises(RuntimeError):
unstacked_sf = stacked_sf.unstack(['category', 'value'], 'user_name')
def test_unstack_list(self):
sf = SFrame()
sf['a'] = [1,2,3,4]
sf['b'] = [range(10), range(20), range(30), range(50)]
stacked_sf = sf.stack('b', new_column_name = 'new_b')
unstacked_sf = stacked_sf.unstack('new_b', new_column_name = 'b')
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b', new_column_name='b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
with self.assertRaises(RuntimeError):
stacked_sf.unstack('new_b', new_column_name='a')
with self.assertRaises(TypeError):
stacked_sf.unstack(['new_b'])
with self.assertRaises(KeyError):
stacked_sf.unstack('non exist')
def test_content_identifier(self):
sf = SFrame({"a":[1,2,3,4],"b":["1","2","3","4"]})
a1 = sf['a'].__get_content_identifier__()
a2 = sf['a'].__get_content_identifier__()
self.assertEquals(a1, a2)
def test_random_access(self):
t1 = list(range(0,100000))
t2 = [str(i) for i in t1]
t = [{'t1':t1[i], 't2':t2[i]} for i in range(len(t1))];
s = SFrame({'t1':t1,'t2':t2})
# simple slices
self.__test_equal(s[1:10000], pd.DataFrame(t[1:10000]))
self.__test_equal(s[0:10000:3], pd.DataFrame(t[0:10000:3]))
self.__test_equal(s[1:10000:3], pd.DataFrame(t[1:10000:3]))
self.__test_equal(s[2:10000:3], pd.DataFrame(t[2:10000:3]))
self.__test_equal(s[3:10000:101], pd.DataFrame(t[3:10000:101]))
# negative slices
self.__test_equal(s[-5:], pd.DataFrame(t[-5:]))
self.__test_equal(s[-1:], pd.DataFrame(t[-1:]))
self.__test_equal(s[-100:-10], pd.DataFrame(t[-100:-10]))
self.__test_equal(s[-100:-10:2], pd.DataFrame(t[-100:-10:2]))
# single element reads
self.assertEqual(s[511], t[511])
self.assertEqual(s[1912],t[1912])
self.assertEqual(s[-1], t[-1])
self.assertEqual(s[-10],t[-10])
# edge case odities
self.__test_equal(s[10:100:100], pd.DataFrame(t[10:100:100]))
self.__test_equal(s[-100:len(s):10], pd.DataFrame(t[-100:len(t):10]))
self.assertEqual(len(s[-1:-2]), 0)
self.assertEqual(len(s[-1:-1000:2]), 0)
with self.assertRaises(IndexError):
s[len(s)]
def sort_n_rows(self, nrows=100):
nrows += 1
sf = SFrame()
sf['a'] = range(1, nrows)
sf['b'] = [float(i) for i in range(1,nrows)]
sf['c'] = [str(i) for i in range(1,nrows)]
sf['d'] = [[i, i+1] for i in range(1,nrows)]
reversed_sf = SFrame()
reversed_sf['a'] = range(nrows-1, 0, -1)
reversed_sf['b'] = [float(i) for i in range(nrows-1, 0, -1)]
reversed_sf['c'] = [str(i) for i in range(nrows-1, 0, -1)]
reversed_sf['d'] = [[i, i+1] for i in range(nrows-1, 0, -1)]
with self.assertRaises(TypeError):
sf.sort()
with self.assertRaises(TypeError):
sf.sort(1)
with self.assertRaises(TypeError):
sf.sort("d")
with self.assertRaises(ValueError):
sf.sort("nonexist")
with self.assertRaises(TypeError):
sf.sort({'a':True})
result = sf.sort('a')
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
# try a lazy input
result = sf[sf['a'] > 10].sort('a')
assert_frame_equal(sf[sf['a'] > 10].to_dataframe(), result.to_dataframe());
result = sf.sort('a', ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# lazy reversed
result = sf[sf['a'] > 10].sort('a', ascending = False)
assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe());
# lazy reversed
result = sf[sf['a'] > 10].sort('a', ascending = False)
assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe());
# sort two columns
result = sf.sort(['a', 'b'])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort(['a', 'c'], ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', True), ('b', False)])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', False), ('b', True)])
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# empty sort should not throw
sf = SFrame({'x':[]})
sf.sort('x')
def test_sort(self):
#self.sort_n_rows(100)
for i in range(1, 10):
self.sort_n_rows(i)
def test_dropna(self):
# empty case
sf = SFrame()
self.assertEquals(len(sf.dropna()), 0)
# normal case
self.__test_equal(self.employees_sf.dropna(), self.employees_sf[0:5].to_dataframe())
test_split = self.employees_sf.dropna_split()
self.__test_equal(test_split[0], self.employees_sf[0:5].to_dataframe())
self.__test_equal(test_split[1], self.employees_sf[5:6].to_dataframe())
# create some other test sframe
test_sf = SFrame({'ints':SArray([None,None,3,4,None], int),
'floats':SArray([np.nan,2.,3.,4.,np.nan],float),
'strs':SArray(['1',np.nan,'','4',None], str),
'lists':SArray([[1],None,[],[1,1,1,1],None], list),
'dicts':SArray([{1:2},{2:3},{},{4:5},None], dict)})
# another normal, but more interesting case
self.__test_equal(test_sf.dropna(),
pd.DataFrame({'ints':[3,4],'floats':[3.,4.],'strs':['','4'],'lists':[[],[1,1,1,1]],'dicts':[{},{4:5}]}))
test_split = test_sf.dropna_split()
self.__test_equal(test_split[0], test_sf[2:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:2].append(test_sf[4:5]).to_dataframe())
# the 'all' case
self.__test_equal(test_sf.dropna(how='all'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(how='all')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# select some columns
self.__test_equal(test_sf.dropna(['ints','floats'], how='all'), test_sf[1:4].to_dataframe())
test_split = test_sf.dropna_split(['ints','floats'], how='all')
self.__test_equal(test_split[0], test_sf[1:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:1].append(test_sf[4:5]).to_dataframe())
self.__test_equal(test_sf.dropna('strs'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split('strs')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
self.__test_equal(test_sf.dropna(['strs','dicts']), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(['strs','dicts'])
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# bad stuff
with self.assertRaises(TypeError):
test_sf.dropna(1)
test_sf.dropna([1,2])
test_sf.dropna('strs', how=1)
test_sf.dropna_split(1)
test_sf.dropna_split([1,2])
test_sf.dropna_split('strs', how=1)
with self.assertRaises(ValueError):
test_sf.dropna('ints', how='blah')
test_sf.dropna_split('ints', how='blah')
with self.assertRaises(RuntimeError):
test_sf.dropna('dontexist')
test_sf.dropna_split('dontexist')
def test_add_row_number(self):
sf = SFrame(self.__create_test_df(400000))
sf = sf.add_row_number('id')
self.assertEquals(list(sf['id']), list(range(0,400000)))
del sf['id']
sf = sf.add_row_number('id', -20000)
self.assertEquals(list(sf['id']), list(range(-20000,380000)))
del sf['id']
sf = sf.add_row_number('id', 40000)
self.assertEquals(list(sf['id']), list(range(40000,440000)))
with self.assertRaises(RuntimeError):
sf.add_row_number('id')
with self.assertRaises(TypeError):
sf = sf.add_row_number(46)
sf = sf.add_row_number('id2',start='hi')
def test_check_lazy_sframe_size(self):
# empty sframe, materialized, has_size
sf = SFrame()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# add one column, not materialized, has_size
sf['a'] = range(1000)
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# materialize it, materialized, has_size
sf['a'] = range(1000)
sf.__materialize__()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# logical filter, not materialized, not has_size
sf = sf[sf['a'] > 5000]
self.assertFalse(sf.__is_materialized__())
self.assertFalse(sf.__has_size__())
def test_lazy_logical_filter_sarray(self):
g=SArray(range(10000))
g2=SArray(range(10000))
a=g[g>10]
a2=g2[g>10]
z=a[a2>20]
self.assertEqual(len(z), 9979)
def test_lazy_logical_filter_sframe(self):
g=SFrame({'a':range(10000)})
g2=SFrame({'a':range(10000)})
a=g[g['a']>10]
a2=g2[g['a']>10]
z=a[a2['a']>20]
self.assertEqual(len(z), 9979)
def test_sframe_to_rdd(self):
if not HAS_PYSPARK:
print("Did not run Pyspark unit tests!")
return
sc = SparkContext('local')
# Easiest case: single column of integers
test_rdd = sc.parallelize(range(100))
sf = SFrame.from_rdd(test_rdd)
self.assertTrue(sf.num_cols(), 1)
self.assertTrue(sf.column_names(), ['X1'])
# We cast integers to floats to be safe on varying types
self.assertEquals([float(i) for i in range(0,100)], list(sf['X1']))
sc.stop()
def test_rdd_to_sframe(self):
if not HAS_PYSPARK:
print("Did not run Pyspark unit tests!")
return
sc = SparkContext('local')
# Easiest case: single column of integers
sf = SFrame({'column_name':range(100)})
test_rdd = sf.to_rdd(sc)
res = test_rdd.collect()
self.assertEquals(res, [{'column_name':long(i)} for i in range(100)])
sc.stop()
def test_column_manipulation_of_lazy_sframe(self):
g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
del g['id']
# if lazy column deletion is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
g.swap_columns('a','id')
# if lazy column swap is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
def test_empty_sarray(self):
with util.TempDirectory() as f:
sf = SArray()
sf.save(f)
sf2 = SArray(f)
self.assertEquals(len(sf2), 0)
def test_empty_sframe(self):
with util.TempDirectory() as f:
sf = SFrame()
sf.save(f)
sf2 = SFrame(f)
self.assertEquals(len(sf2), 0)
self.assertEquals(sf2.num_columns(), 0)
def test_none_column(self):
sf = SFrame({'a':[1,2,3,4,5]})
sf['b'] = None
self.assertEqual(sf['b'].dtype(), float)
df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [None,None,None,None,None]})
self.__test_equal(sf, df)
sa = SArray.from_const(None, 100)
self.assertEquals(list(sa), [None] * 100)
self.assertEqual(sa.dtype(), float)
def test_apply_with_partial(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
def concat_fn(character, row):
return '%s%d' % (character, row['a'])
my_partial_fn = functools.partial(concat_fn, 'x')
sa = sf.apply(my_partial_fn)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, row):
return '%s%d' % (self.character, row['a'])
concatenator = Concatenator('x')
sa = sf.apply(concatenator)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_save_sframe(self):
'''save lazily evaluated SFrame should not matrialize to target folder
'''
data = SFrame()
data['x'] = range(100)
data['x'] = data['x'] > 50
#lazy and good
tmp_dir = tempfile.mkdtemp()
data.save(tmp_dir)
shutil.rmtree(tmp_dir)
print(data)
def test_empty_argmax_does_not_fail(self):
# an empty argmax should not result in a crash
sf = SFrame({'id': [0, 0, 0, 1, 1, 2, 2],
'value': [3.0, 2.0, 2.3, None, None, 4.3, 1.3],
'category': ['A', 'B', 'A', 'E', 'A', 'A', 'B']})
sf.groupby('id', aggregate.ARGMAX('value', 'category'))
def test_cache_invalidation(self):
# Changes to the SFrame should invalidate the indexing cache.
X = SFrame({'a' : range(4000),
'b' : range(4000)})
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : i, 'b' : i})
X['a'] = range(1000, 5000)
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'b' : i})
del X['b']
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i})
X['b'] = X['a']
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'b' : 1000 + i})
X.rename({'b' : 'c'})
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'c' : 1000 + i})
def test_to_numpy(self):
X = SFrame({'a' : range(100),
'b' : range(100)})
import numpy as np
import numpy.testing as nptest
Y = np.transpose(np.array([range(100), range(100)]))
nptest.assert_array_equal(X.to_numpy(), Y)
X['b'] = X['b'].astype(str)
s = [str(i) for i in range(100)]
Y = np.transpose(np.array([s, s]))
nptest.assert_array_equal(X.to_numpy(), Y)
@mock.patch(__name__+'.sqlite3.Cursor', spec=True)
@mock.patch(__name__+'.sqlite3.Connection', spec=True)
def test_from_sql(self, mock_conn, mock_cursor):
# Set up mock connection and cursor
conn = mock_conn('example.db')
curs = mock_cursor()
conn.cursor.return_value = curs
sf_type_codes = [44,44,41,22,114,199,43]
sf_data = list(zip(*self.all_type_cols))
sf_iter = sf_data.__iter__()
def mock_fetchone():
try:
return next(sf_iter)
except StopIteration:
return None
def mock_fetchmany(size=1):
count = 0
ret_list = []
for i in sf_iter:
if count == curs.arraysize:
break
ret_list.append(i)
count += 1
return ret_list
curs.fetchone.side_effect = mock_fetchone
curs.fetchmany.side_effect = mock_fetchmany
curs.description = [['X'+str(i+1),sf_type_codes[i]]+[None for j in range(5)] for i in range(len(sf_data[0]))]
# bigger than cache, no Nones
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, self.sf_all_types)
# smaller than cache, no Nones
sf_iter = sf_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=100, dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, self.sf_all_types)
none_col = [None for i in range(5)]
nones_in_cache = list(zip(*[none_col for i in range(len(sf_data[0]))]))
none_sf = SFrame({'X'+str(i):none_col for i in range(1,len(sf_data[0])+1)})
test_data = (nones_in_cache+sf_data)
sf_iter = test_data.__iter__()
# more None rows than cache & types in description
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
sf_inferred_types = SFrame()
expected_types = [float,float,str,str,str,str,dt.datetime]
for i in zip(self.sf_all_types.column_names(),expected_types):
new_col = SArray(none_col).astype(i[1])
new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: i[1](x) if i[1] is not dt.datetime else x))
sf_inferred_types.add_column(new_col)
# Don't test the string representation of dict and list; there are
# funky consistency issues with the string representations of these
sf.remove_columns(['X5', 'X6'])
sf_inferred_types.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
# more None rows than cache & no type information
for i in range(len(curs.description)):
curs.description[i][1] = None
sf_iter = test_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
sf_inferred_types = SFrame()
expected_types = [str for i in range(len(sf_data[0]))]
for i in zip(self.sf_all_types.column_names(),expected_types):
new_col = SArray(none_col).astype(i[1])
new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: str(x)))
sf_inferred_types.add_column(new_col)
# Don't test the string representation of dict, could be out of order
sf.remove_columns(['X5', 'X6'])
sf_inferred_types.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
### column_type_hints tests
sf_iter = test_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5,
dbapi_module=dbapi2_mock(), column_type_hints=str)
sf.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
# Provide unhintable types
sf_iter = test_data.__iter__()
expected_types = [int,float,str,array.array,list,dict,dt.datetime]
with self.assertRaises(TypeError):
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=5,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
sf_iter = test_data.__iter__()
expected_types = {'X'+str(i+1):expected_types[i] for i in range(3)}
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=10,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
_assert_sframe_equal(sf[5:],self.sf_all_types)
# Test a float forced to a str
sf_iter = test_data.__iter__()
expected_types['X2'] = str
self.sf_all_types['X2'] = self.sf_all_types['X2'].apply(lambda x: str(x))
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=10,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
_assert_sframe_equal(sf[5:],self.sf_all_types)
# Type unsupported by sframe
curs.description = [['X1',44],['X2',44]]
sf_iter = [[complex(4.5,3),1], [complex(3.4,5),2]].__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table")
expected_sf = SFrame({'X1':["(4.5+3j)","(3.4+5j)"],'X2':[1,2]})
_assert_sframe_equal(sf, expected_sf)
# bad DBAPI version!
bad_version = dbapi2_mock()
bad_version.apilevel = "1.0 "
with self.assertRaises(NotImplementedError):
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=bad_version)
# Bad module
with self.assertRaises(AttributeError):
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=os)
# Bad connection
with self.assertRaises(AttributeError):
sf = SFrame.from_sql(4, "SELECT * FROM test_table")
# Empty query result
curs.description = []
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, SFrame())
@mock.patch(__name__+'.sqlite3.Cursor', spec=True)
@mock.patch(__name__+'.sqlite3.Connection', spec=True)
def test_to_sql(self, mock_conn, mock_cursor):
conn = mock_conn('example.db')
curs = mock_cursor()
insert_stmt = "INSERT INTO ins_test (X1,X2,X3,X4,X5,X6,X7) VALUES ({0},{1},{2},{3},{4},{5},{6})"
num_cols = len(self.sf_all_types.column_names())
test_cases = [
('qmark',insert_stmt.format(*['?' for i in range(num_cols)])),
('numeric',insert_stmt.format(*[':'+str(i) for i in range(1,num_cols+1)])),
('named',insert_stmt.format(*[':X'+str(i) for i in range(1,num_cols+1)])),
('format',insert_stmt.format(*['%s' for i in range(num_cols)])),
('pyformat',insert_stmt.format(*['%(X'+str(i)+')s' for i in range(1,num_cols+1)])),
]
for i in test_cases:
conn.cursor.return_value = curs
mock_mod = dbapi2_mock()
mock_mod.paramstyle = i[0]
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=mock_mod)
conn.cursor.assert_called_once_with()
calls = []
col_names = self.sf_all_types.column_names()
for j in self.sf_all_types:
if i[0] == 'named' or i[0] == 'pyformat':
calls.append(mock.call(i[1],j))
else:
calls.append(mock.call(i[1],[j[k] for k in col_names]))
curs.execute.assert_has_calls(calls, any_order=False)
self.assertEquals(curs.execute.call_count, len(self.sf_all_types))
conn.commit.assert_called_once_with()
curs.close.assert_called_once_with()
conn.reset_mock()
curs.reset_mock()
# bad DBAPI version!
bad_version = dbapi2_mock()
bad_version.apilevel = "1.0 "
with self.assertRaises(NotImplementedError):
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=bad_version)
# bad paramstyle
bad_paramstyle = dbapi2_mock()
bad_paramstyle.paramstyle = 'foo'
with self.assertRaises(TypeError):
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=bad_paramstyle)
def test_materialize(self):
sf = SFrame({'a':range(100)})
sf = sf[sf['a'] > 10]
self.assertFalse(sf.is_materialized())
sf.materialize()
self.assertTrue(sf.is_materialized())
def test_materialization_slicing(self):
# Has been known to fail.
g=SFrame({'a':range(100)})[:10]
g['b'] = g['a'] + 1
g['b'].materialize()
g.materialize()
def test_copy(self):
from copy import copy
sf = generate_random_sframe(100, "Cns")
sf_copy = copy(sf)
assert sf is not sf_copy
_assert_sframe_equal(sf, sf_copy)
def test_deepcopy(self):
from copy import deepcopy
sf = generate_random_sframe(100, "Cns")
sf_copy = deepcopy(sf)
assert sf is not sf_copy
_assert_sframe_equal(sf, sf_copy)
if __name__ == "__main__":
import sys
# Check if we are supposed to connect to another server
for i, v in enumerate(sys.argv):
if v.startswith("ipc://"):
_launch(v)
# The rest of the arguments need to get passed through to
# the unittest module
del sys.argv[i]
break
unittest.main()
| bsd-3-clause |
NuGrid/NuGridPy | nugridpy/grain.py | 1 | 61226 | '''
grain.py is a collection of routines to analyze presolar grain data
(and can probably be extended to observational data at a later stage).
This class load the current version of the presolar grain database for
further processing. A private databse can be given as well as
described later. Several routines (see below and NuGrid book) can be
used to filter, plot, and retrieve grain data. The presolar grain
database is supported by the group at Washington University, mainly
Frank Gyngard. The database can be found at
http://presolar.wustl.edu/PGD/Presolar_Grain_Database.html
Important note: This script assumes that you have a full SVN tree
checked out (or actually, that you have at least the utils folder and
the validation folder on the same level checked out.
Usage of these tools
====================
For questions, bug reports, please contact [email protected]
Reto Trappitsch for the NuGrid collaboration
Developing notes
================
data header should be safed lowercase always -> use .lower() argument
also for comparison w/ user input therefore!
if updating the database with new files creates a utf16 error while
reading in the database, open it in excel, save the file, and try
again. if this does not work, good luck...
'''
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import input
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import sys
import xlrd
from .utils import *
from .data_plot import *
### find the path to the tree ###
class gdb(DataPlot, Utils):
'''
This class provides easy access to the presolar grain databse, as
described in the header The databse is read in by default, however
you can choose a private database and if you do so, if you want to
use the private database exclusively or together with the whole
database.
If you use a private datafile, make sure it is in the same
structure as the presolar grain database. The most important
thing is that you have a column named 'Notes' before the data
start. Everything on the right there is data, on the left
description of the data. You don't have to have all the data
columns if you don't have data, but the header of the data columns
needs to be exactly the same as the in the database. Look at
validation/grain_data xls files if you want to see an example of
the formatting / or use it as a template.
Parameters
----------
fname : string
filename to your private database, if not in the main tree
structure with other databse, give full path.
gdbdir : string
In case you have not a full svn tree installed, choose the correct
path to all the data here
gdbload : boolean, optional
True or False: Do you want to load the grain database or not?
The default is True.
iniabufile : string, optional
Which initial abundances should be used to calculate delta
values. Here we assume complete SVN tree (need USEEPP). Give
absolute path otherwise. The default is 'frames/mppnp/USEEPP/iniab2.0E-02GN93.ppn'.
'''
def __init__(self, fname=None, gdbdir=None, gdbload=True,
iniabufile='frames/mppnp/USEEPP/iniab2.0E-02GN93.ppn'):
print('Reading in... this takes a little bit')
if iniabufile[0] != '/':
iniabufile = get_svnpath() + iniabufile
# grab data
header_desc, header_data, desc, data = preprocessor(fname,gdbdir,gdbload)
# make dictionary
descdict = dict(list(zip(header_desc,list(range(len(header_desc))))))
datadict = dict(list(zip(header_data,list(range(len(header_data))))))
# style definer
header_style, style = style_creator(desc,descdict)
styledict = dict(list(zip(header_style,list(range(len(header_style))))))
# make private instances w/ all the data
self._header_desc = header_desc
self._header_data = header_data
self._header_style = header_style
self._desc = desc
self._data = data
self._style = style
self._descdict = descdict
self._datadict = datadict
self._styledict = styledict
# make the working data
self.header_desc = header_desc
self.header_data = header_data
self.header_style = header_style
self.desc = desc
self.data = data
self.style = style
self.descdict = descdict
self.datadict = datadict
self.styledict = styledict
self.inut = iniabu(iniabufile)
def __del__(self):
print('Presolar grain database available at: http://presolar.wustl.edu/PGD/Presolar_Grain_Database.html')
def reset_filter(self):
'''
Resets the filter and goes back to initialized value. This
routine also resets the style if you have changed it.
'''
self.header_desc = self._header_desc
self.header_data = self._header_data
self.header_style = self._header_style
self.desc = self._desc
self.data = self._data
self.style = self._style
self.descdict = self._descdict
self.datadict = self._datadict
self.styledict = self._styledict
def info(self, graintype=True, group=True, reference=False,
phase=True):
'''
This routine gives you informations what kind of grains are
currently available in your filtered version. It gives you
the type of grains available. More to be implemented upon need.
Parameters
----------
graintype, group, references, phase : boolean
What do you wanna print for information. There can be a
lot of references, hence references default is False.
'''
# create a list with all graintypes
gtype_info = []
group_info = []
ref_info = []
phase_info = []
# how many grains in database
print('There are ' + str(len(self.data)) + ' grains in your database.\n')
# graintypes
if graintype:
for i in range(len(self.desc)):
gtype_tmp = self.desc[i][self.descdict['Type']]
wrtchk = True
for j in range(len(gtype_info)):
if gtype_info[j] == gtype_tmp:
wrtchk = False
break
if wrtchk:
gtype_info.append(gtype_tmp)
print('Available graintypes are:')
print('-------------------------')
print(gtype_info)
# groups
if group:
for i in range(len(self.desc)):
group_tmp = self.desc[i][self.descdict['Group']]
wrtchk = True
for j in range(len(group_info)):
if group_info[j] == group_tmp:
wrtchk = False
break
if wrtchk:
group_info.append(group_tmp)
print('\nAvailable groups of grains (for silicates and oxides) are:')
print('----------------------------------------------------------')
print(group_info)
# Phases
if phase:
for i in range(len(self.desc)):
phase_tmp = self.desc[i][self.descdict['Phase']]
wrtchk = True
for j in range(len(phase_info)):
if phase_info[j] == phase_tmp:
wrtchk = False
break
if wrtchk:
phase_info.append(phase_tmp)
print('\nAvailable Phases of grains are:')
print('----------------------------------------------------------')
print(phase_info)
# references
if reference:
for i in range(len(self.desc)):
ref_tmp = self.desc[i][self.descdict['Reference']]
wrtchk = True
for j in range(len(ref_info)):
if ref_info[j] == ref_tmp:
wrtchk = False
break
if wrtchk:
ref_info.append(ref_tmp)
print('\nReferences for grains:')
print('----------------------')
print(ref_info)
def filter_desc(self, graintype=None, group=None, reference=None,
size=None, phase=None):
'''
This routine is to filter for description elements. You can
check what is available in the description by running,
>>> i.header_desc()
where i is the instance you loaded.
You can run the filter multiple times! You can filter for the
following types:
Parameters
----------
graintype : string or list
Give graintypes as either 'M' for only mainstream or more
than one ['M','Z'].
group : integer or list
Group of graintypes, important for oxides and silicates,
since they are split into groups and not into types.
Example 1, or give a list [1,3].
reference : string or list
Give the reference you want to filter for, try an i.info()
to pick the right name! You can select a single
referennce as string or multiple references in as a list.
size : string
Filter for grain sizes, give '<5.0' or '>5.0' as a string
for larger or smaller than a given grainsize in um. Only
data with known grainsizes are chosen. Often grain sizes
are given in a times b, where a and b are the minumum and
maximum measurements from an image. If you give a >5.0
then grains with the smaller dimension >5um are taken into
account. If you want <5.0 then grains with the upper
dimension <5um are taken into account.
'''
# filter for graintype
if graintype != None:
indexing = []
# index file on which lines to pick
if type(graintype) == str:
graintype = [graintype]
# filter
for typ in graintype:
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Type']] == typ:
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for graintype
if phase != None:
indexing = []
# index file on which lines to pick
if type(phase) == str:
phase = [phase]
# filter
for typ in phase:
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Phase']] == typ:
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for group (oxides and silicates)
if group != None:
indexing = []
# index file on which lines to pick
if type(group) != list:
group = [group]
# filter
for grp in group:
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Group']] == str(int(grp)):
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for reference
if reference != None:
indexing = []
# index file on which lines to pick
if type(reference) != list:
reference = [reference]
# filter
for ri in range(len(reference)):
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Reference']] == reference[ri]:
indexing.append(i)
# filter:
self._filter_desc(indexing)
# filter for grainzise
if size != None:
indexing = []
# index file on which lines to pick
# filter
operator = size[0:1]
size = float(size[1:len(size)])
for i in range(len(self.desc)):
if self.desc[i][self.descdict['Size (microns)']] != '':
try:
# print self.desc[i][self.descdict['Size (microns)']]
comperator1 = self.desc[i][self.descdict['Size (microns)']].split('x')[0]
comperator2 = self.desc[i][self.descdict['Size (microns)']].split('x')[1]
comperator = [float(comperator1),float(comperator2)]
if operator == '<':
comperator = np.min(comperator)
else:
comperator = np.max(comperator)
except IndexError or AttributeError:
try:
comperator = float(self.desc[i][self.descdict['Size (microns)']])
except ValueError:
continue
if operator == '>':
if comperator > size:
indexing.append(i)
elif operator == '<':
if comperator < size:
indexing.append(i)
else:
continue
# filter:
self._filter_desc(indexing)
def _filter_desc(self, indexing):
'''
Private function to filter data, goes with filter_desc
'''
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
style_tmp= np.zeros((len(indexing),len(self.header_style)),dtype='|S1024')
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[indexing[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp
else:
print('No filter selected or no data found!')
def filter_single_grain(self):
'''
This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine
'''
my_index = 0
my_grains = [['Index','Label','Type','Group','Meteorite','Mineralogy','C12/C13','d(Si29/Si30)','d(Si30/Si29)']]
# add the data to this grain list
for it in range(len(self.data)):
my_grains.append([my_index,self.desc[it][self.descdict['Grain Label']], self.desc[it][self.descdict['Type']], self.desc[it][self.descdict['Group']], self.desc[it][self.descdict['Meteorite']], self.desc[it][self.descdict['Mineralogy']], self.data[it][self.datadict['12c/13c']], self.data[it][self.datadict['d(29si/28si)']], self.data[it][self.datadict['d(30si/28si)']]])
my_index += 1
for prt_line in my_grains:
print(prt_line)
# now write the selector for the index of the grains to select which one should be
# available and which ones should be dumped
usr_input = ''
usr_input = input('Select the grains by index that you want to use. Please separate the indeces by a comma, e.g., 1 or 0,2,3,4\n')
# process user index
if usr_input == '':
print('No data selected to filter.')
return None
elif len(usr_input) == 1:
usr_index = [usr_input]
else:
usr_index = usr_input.split(',')
for it in range(len(usr_index)):
usr_index[it] = int(usr_index[it])
# filter
desc_tmp = np.zeros((len(usr_index),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(usr_index),len(self.header_data)))
style_tmp= np.zeros((len(usr_index),len(self.header_style)),dtype='|S1024')
for i in range(len(usr_index)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[usr_index[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[usr_index[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[usr_index[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp
def filter_data(self, isos, limit, delta=True):
'''
This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space.
Parameters
----------
isos : list
isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
what do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
do you wanna filter in delta space, then set to True,
otherwise to False. The default is True.
'''
# check availability
dat_index, delta_b, ratio_b = self.check_availability(isos)
if dat_index == -1:
print('Isotopes selected are not available. Check i.datadict (where i is your instance) for availability of isotopes.')
return None
# select if larger or smaller and define limit
if limit[0:1] == '>':
comperator = 'gt'
elif limit[0:1] == '<':
comperator = 'st'
else:
print('Comperator not specified. Limit must be given as \'>5.\' for example.')
return None
try:
limit = float(limit[1:len(limit)])
except ValueError:
print('Limit must be given as \'>5.\' for example.')
return None
# now calculate the actual limit to compare with, depending on if it delta or not or whatsoever
if delta == delta_b: # input and available same
if ratio_b: # one over
if delta:
tmp = self.delta_to_ratio(isos,limit,oneover=True)
comp_lim = self.ratio_to_delta(isos,tmp) # check
else:
comp_lim = old_div(1.,limit) # check
else: # all fine
comp_lim = limit
else: # input and availability not the same
if ratio_b: # one over
if delta: # delta given, ratio one over wanted
comp_lim = self.delta_to_ratio(isos,limit,oneover=True)
else: # ratio given, delta one over wanted
comp_lim = self.ratio_to_delta(isos,limit,oneover=True)
else: # not one over
if delta: # delta given, ratio wanted
comp_lim = self.delta_to_ratio(isos,limit)
else:
comp_lim = self.ratio_to_delta(isos,limit)
# indexing vector
indexing = []
for i in range(len(self.data)):
dat_val = self.data[i][dat_index]
if comperator == 'st':
if dat_val < comp_lim:
indexing.append(i)
else:
if dat_val > comp_lim:
indexing.append(i)
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
self.desc = desc_tmp
self.data = data_tmp
else:
print('No filter selected!')
def filter_uncertainty(self, isos, limit, delta=True):
'''
This subroutine filters isotopic values according to the limit
you give. You can filter in ratio or in delta space. This
routine is based on the uncertainties, e.g., if you want to
select only high quality data.
Parameters
----------
isos : list
Isotopes you want to filter for, e.g., give as
['Si-28', 'Si-30'] for the 28/30 ratio.
limit : string
What do you want to filter for, e.g., ratio or delta > 100,
then give '>100'.
delta : boolean, optional
Do you wanna filter in delta space, then set to True,
otherwise to False. The default is True.
'''
# check availability
dat_index, delta_b, ratio_b = self.check_availability(isos)
if dat_index == -1:
print('Isotopes selected are not available. Check i.datadict (where i is your instance) for availability of isotopes.')
return None
# select if larger or smaller and define limit
if limit[0:1] == '>':
comperator = 'gt'
elif limit[0:1] == '<':
comperator = 'st'
else:
print('Comperator not specified. Limit must be given as \'>5.\' for example.')
return None
try:
limit = float(limit[1:len(limit)])
except ValueError:
print('Limit must be given as \'>5.\' for example.')
return None
# now calculate the actual limit to compare with, depending on if it delta or not or whatsoever
if delta == delta_b: # input and available same
if ratio_b: # one over
if delta:
tmp = self.delta_to_ratio(isos,limit,oneover=True)
comp_lim = self.ratio_to_delta(isos,tmp) # check
else:
comp_lim = old_div(1.,limit) # check
else: # all fine
comp_lim = limit
else: # input and availability not the same
if ratio_b: # one over
if delta: # delta given, ratio one over wanted
comp_lim = self.delta_to_ratio(isos,limit,oneover=True)
else: # ratio given, delta one over wanted
comp_lim = self.ratio_to_delta(isos,limit,oneover=True)
else: # not one over
if delta: # delta given, ratio wanted
comp_lim = self.delta_to_ratio(isos,limit)
else:
comp_lim = self.ratio_to_delta(isos,limit)
# indexing vector
indexing = []
for i in range(len(self.data)):
dat_val = self.data[i][dat_index+1]
if comperator == 'st':
if dat_val < comp_lim:
indexing.append(i)
else:
if dat_val > comp_lim:
indexing.append(i)
# now filter data
if len(indexing) > 0:
desc_tmp = np.zeros((len(indexing),len(self.header_desc)),dtype='|S1024')
data_tmp = np.zeros((len(indexing),len(self.header_data)))
for i in range(len(indexing)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[indexing[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[indexing[i]][k]
self.desc = desc_tmp
self.data = data_tmp
else:
print('No filter selected!')
def style_chg_label(self,type,symb=None,edc=None,fac=None,smbsz=None,edw=None,lab=None):
'''
This routine changes the plotting style that is set by default.
The style is changed according the the label that you choose.
Changing according to reference, use style_chg_ref() function!
You can change it back to default by resetting the filter using
g.reset_filter() routine, assuming that g is your instance. The
format that is used here is:
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
type : string
Select the label of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change. Watch out, if you
want to do more specifications later, the type will
have changed to the new label.
'''
# do stuff for selected type
for i in range(len(self.style)):
# check if type is correct, otherwise continue directly
if self.style[i][self.styledict['Label']] == type:
# change symbol:
if symb != None:
self.style[i][self.styledict['Symbol']] = symb
# change edge color
if edc != None:
self.style[i][self.styledict['Edge color']] = edc
# change face color
if fac != None:
self.style[i][self.styledict['Face color']] = fac
# change symbol size
if smbsz != None:
self.style[i][self.styledict['Symbol size']] = smbsz
# change edge width
if edw != None:
self.style[i][self.styledict['Edge width']] = edw
# change label
if lab != None:
self.style[i][self.styledict['Label']] = lab
def style_chg_ref(self,ref,symb=None,edc=None,fac=None,smbsz=None,edw=None,lab=None):
'''
This routine changes the plotting style that is set by default.
The style is changed according the the reference of the paper
as given in the grain database. For change according to type of
grain, use the routine syle_chg_label().
['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width'
,'Label']
You can see the current styles by running
Attention: You have to give values to all variables that are
compatible with the python mathplotlib. If not, it's your fault
if nothing works.
g.style
Parameters
----------
ref : string
Select the reference of the grains you want to change.
symb : string, optional
Select new symbol. None for no change.
edc : string, optional
Select new edge color. None for no change.
fac : string, optional
Select new face color. None for no change.
smbsz : string, optional
Select new symbol size. None for no change.
edw : string, optional
Select new edge width. None for no change.
lab : string, optional
Select new label. None for no change.
'''
# do stuff for selected reference
for i in range(len(self.style)):
# check if reference is correct, otherwise continue directly
if self.desc[i][self.descdict['Reference']] == ref:
# change symbol:
if symb != None:
self.style[i][self.styledict['Symbol']] = symb
# change edge color
if edc != None:
self.style[i][self.styledict['Edge color']] = edc
# change face color
if fac != None:
self.style[i][self.styledict['Face color']] = fac
# change symbol size
if smbsz != None:
self.style[i][self.styledict['Symbol size']] = smbsz
# change edge width
if edw != None:
self.style[i][self.styledict['Edge width']] = edw
# change label
if lab != None:
self.style[i][self.styledict['Label']] = lab
##### PLOTTING PREPARATOR #####
def plot_ratio_return(self, isox, isoy, deltax=True, deltay=True):
'''
This routine returns data isotopic data to plot from the
filtered list of data.
Parameters
----------
isox : list
Isotopes for x axis in standard format ['Si-28','Si-30'].
isoy : list
Same as isox but for y axis.
deltax : boolean, optional
If true then x-axis values are in delta format. The default
is True.
deltay : boolean, optional
Same as for x-axis but for y-axis. The default is True.
Returns
-------
grpl_xdata
grain plot x-axis data.
grpl_xerr
x-axis error bars.
grpl_ydata
grain plot y-axis data.
grpl_yerr
y-axis error bars.
grpl_style
style data for the different symbols.
'''
# check availability
index_x, delta_b_x, ratio_b_x = self.check_availability(isox)
index_y, delta_b_y, ratio_b_y = self.check_availability(isoy)
if index_x == -1 or index_y == -1:
print('Following input data are not available in the database. Revise your input.')
if index_x == -1:
print('x axis data not available')
if index_y == -1:
print('y axis data not available')
return None
# create x and y data as 1d vectors, also error bars
xdata_vec = np.zeros((len(self.data)))
ydata_vec = np.zeros((len(self.data)))
xdata_err = np.zeros((len(self.data)))
ydata_err = np.zeros((len(self.data)))
for it in range(len(self.data)):
xdata_vec[it] = self.data[it][index_x]
ydata_vec[it] = self.data[it][index_y]
xdata_err[it] = self.data[it][index_x+1]
ydata_err[it] = self.data[it][index_y+1]
# index data that are nan
index_nan = []
for it in range(len(xdata_vec)):
if np.isnan(xdata_vec[it]) or np.isnan(ydata_vec[it]):
index_nan.append(it)
# make range of all incides
index_filtered = list(range(len(xdata_vec)))
for it in range(len(index_nan)):
index_filtered.remove(index_nan[it])
xdata_tmp = np.zeros((len(index_filtered)))
ydata_tmp = np.zeros((len(index_filtered)))
xerr_tmp = np.zeros((len(index_filtered)))
yerr_tmp = np.zeros((len(index_filtered)))
style_plt = np.zeros((len(index_filtered),len(self.header_style)),dtype='|S1024')
for i in range(len(index_filtered)):
xdata_tmp[i] = xdata_vec[index_filtered[i]]
ydata_tmp[i] = ydata_vec[index_filtered[i]]
xerr_tmp[i] = xdata_err[index_filtered[i]]
yerr_tmp[i] = ydata_err[index_filtered[i]]
for j in range(len(style_plt[i])):
style_plt[i][j] = self.style[index_filtered[i]][j]
xdata_vec = xdata_tmp
ydata_vec = ydata_tmp
xdata_err = xerr_tmp
ydata_err = yerr_tmp
# loop through error and set nans to 0
for i in range(len(xdata_err)):
if np.isnan(xdata_err[i]):
xdata_err[i] = 0.
if np.isnan(ydata_err[i]):
ydata_err[i] = 0.
# make start stop index for groups
start_stop = []
start = 0
for it in range(len(xdata_vec)-1):
if (style_plt[it] == style_plt[it+1]).all():
continue
else:
stop = it + 1
start_stop.append([start,stop])
start = stop
# last entry
if start_stop == []:
start_stop.append([0,len(xdata_vec)])
else:
start_stop.append([start_stop[len(start_stop)-1][1],len(xdata_vec)+1])
# now append things to return variables
grain_plt_xdata = []
grain_plt_ydata = []
grain_plt_xerr = []
grain_plt_yerr = []
grain_plt_style = []
for i in range(len(start_stop)):
grain_plt_xdata.append(xdata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_ydata.append(ydata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_xerr.append(xdata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_yerr.append(ydata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_style.append(style_plt[start_stop[i][0]])
return [grain_plt_xdata,grain_plt_xerr,grain_plt_ydata,grain_plt_yerr,grain_plt_style]
def plot_pattern_return(self, isos, delta=True):
'''
This routine returns data isotopic data to plot from the
filtered list of data.
Parameters
----------
isos : list
Isotopes for x axis in standard format
[['Si-30','Si-28'],['Si-29','Si-30'],...]
isoy : list
Same as isox but for y axis.
deltay: boolean, optional
Same as for x-axis but for y-axis. The default is True.
Returns
-------
grpl_data
grain plot x-axis data.
grpl_err
x-axis error bars.
grpl_style
style data for the different symbols.
'''
# check availability
index = []
delta_b = []
ratio_b = []
for i in range(len(isos)):
tmpi,tmpd,tmpr = self.check_availability(isos[i])
index.append(tmpi)
delta_b.append(tmpd)
ratio_b.append(tmpd)
for i in range(len(index)):
if index[i] == -1:
print('Input not available for: ' + isos[i] + '. Revise!')
return None
# create x and y data as 1d vectors, also error bars
data_vec = np.zeros((len(self.data),len(isos)))
data_err = np.zeros((len(self.data),len(isos)))
for it in range(len(self.data)):
for jt in range(len(isos)):
data_vec[it][jt] = self.data[it][index[jt]]
data_err[it][jt] = self.data[it][index[jt]+1]
# index data that are nan
index_nan = []
for it in range(len(data_vec)):
for jt in range(len(data_vec[it])):
if np.isnan(data_vec[it][jt]):
index_nan.append(it)
# make range of all incides
index_filtered = list(range(len(data_vec)))
for it in range(len(index_nan)):
index_filtered.remove(index_nan[it])
data_tmp = np.zeros((len(index_filtered),len(isos)))
err_tmp = np.zeros((len(index_filtered),len(isos)))
style_plt = np.zeros((len(index_filtered),len(self.header_style)),dtype='|S1024')
for i in range(len(index_filtered)):
data_tmp[i] = data_vec[index_filtered[i]]
err_tmp[i] = data_err[index_filtered[i]]
for j in range(len(style_plt[i])):
style_plt[i][j] = self.style[i][j]
xdata_vec = xdata_tmp
ydata_vec = ydata_tmp
xdata_err = xerr_tmp
ydata_err = yerr_tmp
# loop through error and set nans to 0
for i in range(len(xdata_err)):
if np.isnan(xdata_err[i]):
xdata_err[i] = 0.
if np.isnan(ydata_err[i]):
ydata_err[i] = 0.
# FIXME here
# make start stop index for groups
start_stop = []
start = 0
for it in range(len(xdata_vec)-1):
if (style_plt[it] == style_plt[it+1]).all():
continue
else:
stop = it
start_stop.append([start,stop])
start = stop+1
# last entry
if start_stop == []:
start_stop.append([0,len(xdata_vec)])
else:
start_stop.append([start_stop[len(start_stop)-1][1]+1,len(xdata_vec)])
# now append things to return variables
grain_plt_xdata = []
grain_plt_ydata = []
grain_plt_xerr = []
grain_plt_yerr = []
grain_plt_style = []
for i in range(len(start_stop)):
grain_plt_xdata.append(xdata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_ydata.append(ydata_vec[start_stop[i][0]:start_stop[i][1]])
grain_plt_xerr.append(xdata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_yerr.append(ydata_err[start_stop[i][0]:start_stop[i][1]])
grain_plt_style.append(style_plt[start_stop[i][0]])
return [grain_plt_xdata,grain_plt_xerr,grain_plt_ydata,grain_plt_yerr,grain_plt_style]
##### SMALL HELPER ROUTINES #####
def check_availability(self, isos):
'''
This routine checks if the requested set of isotopes is
available in the dataset.
Parameters
----------
isos : list
set of isotopes in format ['Si-28','Si-30'].
Returns
-------
list
[index, delta_b, ratio_b].
index: where is it.
delta_b: is it a delta value or not?
ratio_ib: True if ratio is inverted, false if not
'''
# make names
iso1name = iso_name_converter(isos[0])
iso2name = iso_name_converter(isos[1])
ratio = iso1name + '/' + iso2name
ratio_inv = iso2name + '/' + iso1name
delta = 'd(' + iso1name + '/' + iso2name + ')'
delta_inv = 'd(' + iso2name + '/' + iso1name + ')'
index = -1
# search for data entry
try:
index = self.datadict[ratio]
delta_b = False
ratio_b = False
except KeyError:
try:
index = self.datadict[ratio_inv]
delta_b = False
ratio_b = True
except KeyError:
try:
index = self.datadict[delta]
delta_b = True
ratio_b = False
except KeyError:
try:
index = self.datadict[delta_inv]
delta_b = True
ratio_b = True
except KeyError:
index = -1
delta_b = None
ratio_b = None
return index, delta_b, ratio_b
def ratio_to_delta(self, isos_ss, ratio, oneover=False):
'''
Transforms an isotope ratio into a delta value
Parameters
----------
isos_ss: list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
ratio : float
ratio of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before transforming (never
inverse of delta value!). The default is False.
Returns
-------
float
delta value
'''
# define if isos_ss is the ratio or the isotopes
if type(isos_ss) == float:
ss_ratio = isos_ss
elif type(isos_ss) == list:
ss_ratio = self.inut.isoratio_init(isos_ss)
else:
print('Check input of isos_ss into ratio_to_delta routine')
return None
# check if one over is necessary or not
if oneover:
ratio = old_div(1,ratio)
# calculate delta value
delta = (old_div(ratio, ss_ratio) - 1.) * 1000.
return delta
def delta_to_ratio(self, isos_ss, delta, oneover=False):
'''
Transforms a delta value into an isotopic ratio
Parameters
----------
isos_ss : list or float
list w/ isotopes, e.g., ['N-14','N-15'] OR the solar
system ratio.
delta : float
delta value of the isotopes to transform.
oneover : boolean
take the inverse of the ratio before returning it (never
of the delta value!). The default is False.
Returns
-------
float
delta value
'''
# define if isos_ss is the ratio or the isotopes
if type(isos_ss) == float:
ss_ratio = isos_ss
elif type(isos_ss) == list:
ss_ratio = self.inut.isoratio_init(isos_ss)
else:
print('Check input of isos_ss into ratio_to_delta routine')
return None
# transform to ratio
ratio = (old_div(delta, 1000.) + 1) * ss_ratio
# one over necessary or not?
if oneover:
ratio = old_div(1,ratio)
return ratio
##### SMALL SUBROUTINES THAT DO NOT NEED TO BE INSIDE THE CLASS, KIND OF GRAIN.PY SPECIFIC UTILS #####
def iso_name_converter(iso):
'''
Converts the name of the given isotope (input), e.g., 'N-14' to
14N as used later to compare w/ grain database.
'''
sp = iso.split('-')
output = sp[1] + sp[0]
return output.lower()
def get_svnpath():
'''
This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run.
'''
svnpathtmp = __file__
splitsvnpath = svnpathtmp.split('/')
if len(splitsvnpath) == 1:
svnpath = os.path.abspath('.') + '/../../'
else:
svnpath = ''
for i in range(len(splitsvnpath)-3):
svnpath += splitsvnpath[i] + '/'
return svnpath
############################
##### BIG PREPROCESSOR #####
############################
# subroutine that reads in data and splits into nice numpy arrays
def preprocessor(fname,gdbdir,gdbload, wb_sic=None):
if gdbdir == None:
# path to svn
svnpathtmp = get_svnpath()
gdbdir = svnpathtmp + 'validation/grain_data/' # grain data directory
# Initialize private file if available
if fname != None:
wb_pri = xlrd.open_workbook(gdbdir + fname)
sh_pri = wb_pri.sheet_by_index(0)
print('Private file ' + fname + ' initialized.')
# Initialize grain database
if gdbload:
# SiC
wb_sic = xlrd.open_workbook(gdbdir + 'SiC-All.xls')
sh_sic = wb_sic.sheet_by_index(0)
# Graphites
wb_gra = xlrd.open_workbook(gdbdir + 'graphite-All.xls')
sh_gra = wb_gra.sheet_by_index(0)
# Oxides and Silicates
wb_oxi = xlrd.open_workbook(gdbdir + 'oxide-silicate-all.xls')
sh_oxi = wb_oxi.sheet_by_index(0)
# Misc grains
wb_mis = xlrd.open_workbook(gdbdir + 'miscellaneous-SiN.xls')
sh_mis = wb_mis.sheet_by_index(0)
# now bring all files together into one database (if private is not the only file specified)
header_data = list() # header for data
header_desc = list() # header for description
if gdbload:
# SiC - first file
head = sh_sic.row_values(0)
# now split up to header_data and header_desc
headswtch=True # switch from description to data
for head_i in head:
if headswtch: # write description header
writeswtch = True
for i in range(len(header_desc)):
if header_desc[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_desc.append(head_i)
if len(head_i) >= 5:
if head_i[0:5] == 'Notes':
headswtch = False
else: # write data header
writeswtch = True
for i in range(len(header_data)):
if header_data[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_data.append(head_i)
# Graphites
head = sh_gra.row_values(0)
headswtch=True # switch from description to data
for head_i in head:
if headswtch: # write description header
writeswtch = True
for i in range(len(header_desc)):
if header_desc[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_desc.append(head_i)
if len(head_i) >= 5:
if head_i[0:5] == 'Notes':
headswtch = False
else: # write data header
writeswtch = True
for i in range(len(header_data)):
if header_data[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_data.append(head_i)
# Silicates and Oxides
head = sh_oxi.row_values(0)
headswtch=True # switch from description to data
for head_i in head:
if headswtch: # write description header
writeswtch = True
for i in range(len(header_desc)):
if header_desc[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_desc.append(head_i)
if len(head_i) >= 5:
if head_i[0:5] == 'Notes':
headswtch = False
else: # write data header
writeswtch = True
for i in range(len(header_data)):
if header_data[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_data.append(head_i)
# Misc
head = sh_mis.row_values(0)
headswtch=True # switch from description to data
for head_i in head:
if headswtch: # write description header
writeswtch = True
for i in range(len(header_desc)):
if header_desc[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_desc.append(head_i)
if len(head_i) >= 5:
if head_i[0:5] == 'Notes':
headswtch = False
else: # write data header
writeswtch = True
for i in range(len(header_data)):
if header_data[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_data.append(head_i)
# Private file
if fname != None:
head = sh_pri.row_values(0)
headswtch=True # switch from description to data
for head_i in head:
if headswtch: # write description header
writeswtch = True
for i in range(len(header_desc)):
if header_desc[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_desc.append(head_i)
if len(head_i) >= 5:
if head_i[0:5] == 'Notes':
headswtch = False
else: # write data header
writeswtch = True
for i in range(len(header_data)):
if header_data[i] == head_i:
writeswtch = False
if writeswtch:
if head_i.replace(' ','') != '':
header_data.append(head_i)
# Raise error if nothing is specified
if gdbload == False and fname == None:
print('Nothing to load is specified!')
return [],[],[],[]
if gdbload:
# Prepare the data -> description and data, fill it into appropriate forms
# total amount of data entries
sic_len = len(sh_sic.col_values(0))
gra_len = len(sh_gra.col_values(0))
oxi_len = len(sh_oxi.col_values(0))
mis_len = len(sh_mis.col_values(0))
totdata = sic_len + gra_len + oxi_len + mis_len - 4
if fname != None:
pri_len = len(sh_pri.col_values(0))
totdata += pri_len - 1
totdata = int(totdata)
else:
pri_len = len(sh_pri.col_values(0))
totdata = pri_len - 1
# initialize the description list
descr = np.zeros((totdata,len(header_desc)+1),dtype='|S1024') # string with 1024 characters
# initialize data array
data = np.zeros((totdata,len(header_data)))
for i in range(len(data)):
for j in range(len(data[i])):
data[i][j] = nan
jadder = 0
if gdbload:
# SiC
sic_hdict = dict(list(zip(sh_sic.row_values(0),np.arange(len(sh_sic.row_values(0))))))
# description data
for i in range(len(header_desc)):
try:
dat_tmp = sh_sic.col_values(sic_hdict[header_desc[i]])
except KeyError:
dat_tmp = False
if dat_tmp != False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) == float:
dat_tmp[j] = str(dat_tmp[j])
if dat_tmp[j].replace(' ','') != '':
try:
descr[j+jadder-1][i] = dat_tmp[j]
except UnicodeEncodeError:
descr[j+jadder-1][i] == '' # actual data
descr[j+jadder-1][len(header_desc)] = 'SiC'
for i in range(len(header_data)):
try:
dat_tmp = sh_sic.col_values(sic_hdict[header_data[i]])
except KeyError:
dat_tmp = False
if dat_tmp!= False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) != float:
dat_tmp_append = nan
else:
dat_tmp_append = float(dat_tmp[j])
data[j-1][i] = dat_tmp_append
jadder += sic_len - 1
# Graphites
gra_hdict = dict(list(zip(sh_gra.row_values(0),np.arange(len(sh_gra.row_values(0))))))
# description data
for i in range(len(header_desc)):
try:
dat_tmp = sh_gra.col_values(gra_hdict[header_desc[i]])
except KeyError:
dat_tmp = False
if dat_tmp != False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) == float:
dat_tmp[j] = str(dat_tmp[j])
if dat_tmp[j].replace(' ','') != '':
try:
descr[j+jadder-1][i] = dat_tmp[j]
except UnicodeEncodeError:
descr[j+jadder-1][i] == ''
descr[j+jadder-1][len(header_desc)] = 'Graphites'
# actual data
for i in range(len(header_data)):
try:
dat_tmp = sh_gra.col_values(gra_hdict[header_data[i]])
except KeyError:
dat_tmp = False
if dat_tmp!= False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) != float:
dat_tmp_append = nan
else:
dat_tmp_append = float(dat_tmp[j])
data[j+jadder-1][i] = dat_tmp_append
jadder += gra_len - 1
# Oxides
oxi_hdict = dict(list(zip(sh_oxi.row_values(0),np.arange(len(sh_oxi.row_values(0))))))
# description data
for i in range(len(header_desc)):
try:
dat_tmp = sh_oxi.col_values(oxi_hdict[header_desc[i]])
except KeyError:
dat_tmp = False
if dat_tmp != False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) == float:
dat_tmp[j] = str(dat_tmp[j])
if dat_tmp[j].replace(' ','') != '':
try:
descr[j+jadder-1][i] = dat_tmp[j]
except UnicodeEncodeError:
descr[j+jadder-1][i] == ''
descr[j+jadder-1][len(header_desc)] = 'Oxides, Silicates'
# actual data
for i in range(len(header_data)):
try:
dat_tmp = sh_oxi.col_values(oxi_hdict[header_data[i]])
except KeyError:
dat_tmp = False
if dat_tmp!= False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) != float:
dat_tmp_append = nan
else:
dat_tmp_append = float(dat_tmp[j])
data[j+jadder-1][i] = dat_tmp_append
jadder += oxi_len - 1
# Misc
mis_hdict = dict(list(zip(sh_mis.row_values(0),np.arange(len(sh_mis.row_values(0))))))
# description data
for i in range(len(header_desc)):
try:
dat_tmp = sh_mis.col_values(mis_hdict[header_desc[i]])
except KeyError:
dat_tmp = False
if dat_tmp != False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) == float:
dat_tmp[j] = str(dat_tmp[j])
if dat_tmp[j].replace(' ','') != '':
try:
descr[j+jadder-1][i] = dat_tmp[j]
except UnicodeEncodeError:
descr[j+jadder-1][i] == ''
descr[j+jadder-1][len(header_desc)] = 'Misc'
# actual data
for i in range(len(header_data)):
try:
dat_tmp = sh_mis.col_values(mis_hdict[header_data[i]])
except KeyError:
dat_tmp = False
if dat_tmp!= False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) != float:
dat_tmp_append = nan
else:
dat_tmp_append = float(dat_tmp[j])
data[j+jadder-1][i] = dat_tmp_append
jadder += mis_len - 1
# Private file
if fname != None:
pri_hdict = dict(list(zip(sh_pri.row_values(0),np.arange(len(sh_pri.row_values(0))))))
# description data
for i in range(len(header_desc)):
try:
dat_tmp = sh_pri.col_values(pri_hdict[header_desc[i]])
except KeyError:
dat_tmp = False
if dat_tmp != False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) == float:
dat_tmp[j] = str(dat_tmp[j])
if dat_tmp[j].replace(' ','') != '':
try:
descr[j+jadder-1][i] = dat_tmp[j]
except UnicodeEncodeError:
descr[j+jadder-1][i] == '' # actual data
descr[j+jadder-1][len(header_desc)] = 'Private'
for i in range(len(header_data)):
try:
dat_tmp = sh_pri.col_values(pri_hdict[header_data[i]])
except KeyError:
dat_tmp = False
if dat_tmp!= False:
for j in range(1,len(dat_tmp)):
if type(dat_tmp[j]) != float:
dat_tmp_append = nan
else:
dat_tmp_append = float(dat_tmp[j])
data[j+jadder-1][i] = dat_tmp_append
# make the data header lower case
for i in range(len(header_data)):
header_data[i] = header_data[i].lower()
header_desc.append('Database')
return header_desc, header_data, descr, data
### style creator ###
def style_creator(desc,descdict):
# make style definitions for plotting and everything
header_style = ['Symbol', 'Edge color', 'Face color', 'Symbol size', 'Edge width','Label']
style = np.zeros((len(desc),len(header_style)),dtype='|S1024')
# fill the styles in
for i in range(len(style)):
style[i][4] = '1' # edge width
# SiC grains
if desc[i][descdict['Database']] == 'SiC':
style[i][0] = 'o' # symbol
style[i][3] = '8' # symbol size
if desc[i][descdict['Type']] == 'M':
style[i][1] = '0.4'
style[i][5] = 'SiC M'
elif desc[i][descdict['Type']] == 'X':
style[i][1] = 'b'
style[i][5] = 'SiC X'
elif desc[i][descdict['Type']] == 'Y':
style[i][1] = 'g'
style[i][5] = 'SiC Y'
elif desc[i][descdict['Type']] == 'Z':
style[i][1] = 'r'
style[i][5] = 'SiC Z'
elif desc[i][descdict['Type']] == 'AB':
style[i][1] = 'c'
style[i][5] = 'SiC AB'
elif desc[i][descdict['Type']] == 'C' or desc[i][descdict['Type']] == 'U/C':
style[i][1] = 'y'
style[i][5] = 'SiC C'
elif desc[i][descdict['Type']] == 'N':
style[i][1] = 'm'
style[i][5] = 'SiC nova'
else:
style[i][1] = '0.7'
style[i][5] = 'SiC unclassified'
style[i][2] = style[i][1]
elif desc[i][descdict['Database']] == 'Graphites':
style[i][0] = 's' # symbol
style[i][3] = '8' # symbol size
if desc[i][descdict['Type']] == 'HD':
style[i][1] = '0.4'
style[i][5] = 'Graphite HD'
elif desc[i][descdict['Type']] == 'LD':
style[i][1] = 'b'
style[i][5] = 'Graphite LD'
else:
style[i][1] = '0.7'
style[i][5] = 'Graphite'
style[i][2] = style[i][1]
elif desc[i][descdict['Database']] == 'Oxides, Silicates':
style[i][0] = '^' # symbol
style[i][3] = '8' # symbol size
if desc[i][descdict['Group']] == '1':
style[i][1] = '0.4'
style[i][5] = 'Oxide / Silicate Group 1'
elif desc[i][descdict['Group']] == '2':
style[i][1] = 'b'
style[i][5] = 'Oxide / Silicate Group 2'
elif desc[i][descdict['Group']] == '3':
style[i][1] = 'g'
style[i][5] = 'Oxide / Silicate Group 3'
elif desc[i][descdict['Group']] == '4':
style[i][1] = 'r'
style[i][5] = 'Oxide / Silicate Group 4'
else:
style[i][1] = '0.7'
style[i][5] = 'Oxide / Silicate'
style[i][2] = style[i][1]
elif desc[i][descdict['Database']] == 'Misc':
style[i][0] = 'v'
style[i][3] = '8'
style[i][1] = '0.4'
style[i][2] = style[i][1]
style[i][5] = 'Misc'
elif desc[i][descdict['Database']] == 'Private':
style[i][0] = '>'
style[i][1] = '0.4'
style[i][3] = '8'
style[i][2] = style[i][1]
style[i][5] = 'Private'
else:
style[i][0] = '+'
style[i][1] = '0.4'
style[i][3] = '8'
style[i][2] = style[i][1]
style[i][5] = 'Unknown'
return header_style, style
| bsd-3-clause |
466152112/hyperopt | hyperopt/tests/test_vectorize.py | 7 | 7707 | import numpy as np
from hyperopt.pyll import as_apply, scope, rec_eval, clone, dfs
from hyperopt.pyll.stochastic import recursive_set_rng_kwarg
from hyperopt import base, fmin, rand
from hyperopt.vectorize import VectorizeHelper
from hyperopt.vectorize import replace_repeat_stochastic
from hyperopt.pyll_utils import hp_choice
from hyperopt.pyll_utils import hp_uniform
from hyperopt.pyll_utils import hp_quniform
from hyperopt.pyll_utils import hp_loguniform
from hyperopt.pyll_utils import hp_qloguniform
def config0():
p0 = scope.uniform(0, 1)
p1 = scope.uniform(2, 3)
p2 = scope.one_of(-1, p0)
p3 = scope.one_of(-2, p1)
p4 = 1
p5 = [3, 4, p0]
p6 = scope.one_of(-3, p1)
d = locals()
d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
s = as_apply(d)
return s
def test_clone():
config = config0()
config2 = clone(config)
nodeset = set(dfs(config))
assert not any(n in nodeset for n in dfs(config2))
foo = recursive_set_rng_kwarg(
config,
scope.rng_from_seed(5))
r = rec_eval(foo)
print r
r2 = rec_eval(
recursive_set_rng_kwarg(
config2,
scope.rng_from_seed(5)))
print r2
assert r == r2
def test_vectorize_trivial():
N = as_apply(15)
p0 = hp_uniform('p0', 0, 1)
loss = p0
print loss
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
full_output = as_apply([vloss,
vh.idxs_by_label(),
vh.vals_by_label()])
fo2 = replace_repeat_stochastic(full_output)
new_vc = recursive_set_rng_kwarg(
fo2,
as_apply(np.random.RandomState(1)),
)
#print new_vc
losses, idxs, vals = rec_eval(new_vc)
print 'losses', losses
print 'idxs p0', idxs['p0']
print 'vals p0', vals['p0']
p0dct = dict(zip(idxs['p0'], vals['p0']))
for ii, li in enumerate(losses):
assert p0dct[ii] == li
def test_vectorize_simple():
N = as_apply(15)
p0 = hp_uniform('p0', 0, 1)
loss = p0 ** 2
print loss
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
full_output = as_apply([vloss,
vh.idxs_by_label(),
vh.vals_by_label()])
fo2 = replace_repeat_stochastic(full_output)
new_vc = recursive_set_rng_kwarg(
fo2,
as_apply(np.random.RandomState(1)),
)
#print new_vc
losses, idxs, vals = rec_eval(new_vc)
print 'losses', losses
print 'idxs p0', idxs['p0']
print 'vals p0', vals['p0']
p0dct = dict(zip(idxs['p0'], vals['p0']))
for ii, li in enumerate(losses):
assert p0dct[ii] ** 2 == li
def test_vectorize_multipath():
N = as_apply(15)
p0 = hp_uniform('p0', 0, 1)
loss = hp_choice('p1', [1, p0, -p0]) ** 2
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
print vloss
full_output = as_apply([vloss,
vh.idxs_by_label(),
vh.vals_by_label()])
new_vc = recursive_set_rng_kwarg(
full_output,
as_apply(np.random.RandomState(1)),
)
losses, idxs, vals = rec_eval(new_vc)
print 'losses', losses
print 'idxs p0', idxs['p0']
print 'vals p0', vals['p0']
print 'idxs p1', idxs['p1']
print 'vals p1', vals['p1']
p0dct = dict(zip(idxs['p0'], vals['p0']))
p1dct = dict(zip(idxs['p1'], vals['p1']))
for ii, li in enumerate(losses):
print ii, li
if p1dct[ii] != 0:
assert li == p0dct[ii] ** 2
else:
assert li == 1
def test_vectorize_config0():
p0 = hp_uniform('p0', 0, 1)
p1 = hp_loguniform('p1', 2, 3)
p2 = hp_choice('p2', [-1, p0])
p3 = hp_choice('p3', [-2, p1])
p4 = 1
p5 = [3, 4, p0]
p6 = hp_choice('p6', [-3, p1])
d = locals()
d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
config = as_apply(d)
N = as_apply('N:TBA')
expr = config
expr_idxs = scope.range(N)
vh = VectorizeHelper(expr, expr_idxs, build=True)
vconfig = vh.v_expr
full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])
if 1:
print '=' * 80
print 'VECTORIZED'
print full_output
print '\n' * 1
fo2 = replace_repeat_stochastic(full_output)
if 0:
print '=' * 80
print 'VECTORIZED STOCHASTIC'
print fo2
print '\n' * 1
new_vc = recursive_set_rng_kwarg(
fo2,
as_apply(np.random.RandomState(1))
)
if 0:
print '=' * 80
print 'VECTORIZED STOCHASTIC WITH RNGS'
print new_vc
Nval = 10
foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})
print 'foo[0]', foo[0]
print 'foo[1]', foo[1]
assert len(foo) == Nval
if 0: # XXX refresh these values to lock down sampler
assert foo[0] == {
'p0': 0.39676747423066994,
'p1': None,
'p2': 0.39676747423066994,
'p3': 2.1281244479293568,
'p4': 1,
'p5': (3, 4, 0.39676747423066994) }
assert foo[1] != foo[2]
print idxs
print vals['p3']
print vals['p6']
print idxs['p1']
print vals['p1']
assert len(vals['p3']) == Nval
assert len(vals['p6']) == Nval
assert len(idxs['p1']) < Nval
p1d = dict(zip(idxs['p1'], vals['p1']))
for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
if p3v == p6v == 0:
assert ii not in idxs['p1']
if p3v:
assert foo[ii]['p3'] == p1d[ii]
if p6v:
print 'p6', foo[ii]['p6'], p1d[ii]
assert foo[ii]['p6'] == p1d[ii]
def test_distributions():
# test that the distributions come out right
# XXX: test more distributions
space = {
'loss': (
hp_loguniform('lu', -2, 2) +
hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
hp_quniform('qu', -4.999, 5, 1) +
hp_uniform('u', 0, 10)),
'status': 'ok'}
trials = base.Trials()
N = 1000
fmin(lambda x: x,
space=space,
algo=rand.suggest,
trials=trials,
max_evals=N,
rstate=np.random.RandomState(124),
catch_eval_exceptions=False)
assert len(trials) == N
idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
print idxs.keys()
COUNTMAX = 130
COUNTMIN = 70
# -- loguniform
log_lu = np.log(vals['lu'])
assert len(log_lu) == N
assert -2 < np.min(log_lu)
assert np.max(log_lu) < 2
h = np.histogram(log_lu)[0]
print h
assert np.all(COUNTMIN < h)
assert np.all(h < COUNTMAX)
# -- quantized log uniform
qlu = vals['qlu']
assert np.all(np.fmod(qlu, 2) == 0)
assert np.min(qlu) == 2
assert np.max(qlu) == 20
bc_qlu = np.bincount(qlu)
assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]
# -- quantized uniform
qu = vals['qu']
assert np.min(qu) == -5
assert np.max(qu) == 5
assert np.all(np.fmod(qu, 1) == 0)
bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
assert np.all(40 < bc_qu), bc_qu # XXX: how to get the distribution flat
# with new rounding rule?
assert np.all(bc_qu < 125), bc_qu
assert np.all(bc_qu < COUNTMAX)
# -- uniform
u = vals['u']
assert np.min(u) > 0
assert np.max(u) < 10
h = np.histogram(u)[0]
print h
assert np.all(COUNTMIN < h)
assert np.all(h < COUNTMAX)
#import matplotlib.pyplot as plt
#plt.hist(np.log(vals['node_2']))
#plt.show()
| bsd-3-clause |
hitszxp/scikit-learn | sklearn/metrics/__init__.py | 2 | 3262 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
# Deprecated in 0.16
from .ranking import auc_score
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
]
| bsd-3-clause |
pkruskal/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
3manuek/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
duane-edgington/stoqs | stoqs/contrib/analysis/drift_data.py | 3 | 21334 | #!/usr/bin/env python
__author__ = "Mike McCann"
__copyright__ = "Copyright 2014, MBARI"
__license__ = "GPL"
__maintainer__ = "Mike McCann"
__email__ = "mccann at mbari.org"
__status__ = "Development"
__doc__ = '''
Script produce products (plots, kml, etc.) to help understand drifting data.
- Make progressive vector diagram from moored ADCP data (read from STOQS)
- Plot drogued drifter, ship, and other data (read from Tracking DB)
- Plot sensor data (read from STOQS)
Output as a .png map, .kml file, or ...
Mike McCann
MBARI 22 September 2014
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@author: __author__
@status: __status__
@license: __license__
'''
import os
import sys
project_dir = os.path.join(os.path.dirname(__file__), "../../") # settings.py is two dirs up
sys.path.insert(0, project_dir)
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.local'
import django
django.setup()
import csv
import time
import pyproj
import requests
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytz
from datetime import datetime
from collections import defaultdict
from contextlib import closing
from django.conf import settings
from stoqs.models import MeasuredParameter, NominalLocation, ActivityParameter
from django.http import HttpRequest
from utils.Viz.plotting import readCLT
from utils.Viz.KML import KML
from mpl_toolkits.basemap import Basemap
class Drift():
'''Data and methods to support drift data product preparation
'''
trackDrift = defaultdict(lambda: {'es': [], 'lon': [], 'lat': []}) # To be keyed by platform name
adcpDrift = defaultdict(lambda: {'es': [], 'lon': [], 'lat': []}) # To be keyed by depth
# Not to be confused with Stokes Drift - To be keyed by parameter,platform,min,max
stoqsDrift = defaultdict(lambda: {'es': [], 'lon': [], 'lat': [], 'depth': [], 'datavalue':[]})
def loadTrackingData(self):
'''Fill up trackDrift dictionary
'''
for url in self.args.trackData:
# Careful - trackingdb returns the records in reverse time order
if self.args.verbose:
print('Opening', url)
with closing(requests.get(url, stream=True)) as resp:
if resp.status_code != 200:
logger.error('Cannot read %s, resp.status_code = %s', url, resp.status_code)
return
r_decoded = (line.decode('utf-8') for line in resp.iter_lines())
for r in csv.DictReader(r_decoded):
# Use logic to skip inserting values if one or the other or both start and end are specified
if self.startDatetime:
if datetime.utcfromtimestamp(float(r['epochSeconds'])) < self.startDatetime:
continue
if self.endDatetime:
if datetime.utcfromtimestamp(float(r['epochSeconds'])) > self.endDatetime:
continue
self.trackDrift[r['platformName']]['es'].insert(0, float(r['epochSeconds']))
self.trackDrift[r['platformName']]['lat'].insert(0, float(r['latitude']))
self.trackDrift[r['platformName']]['lon'].insert(0, float(r['longitude']))
def computeADCPDrift(self):
'''Read data from database and put computed progressive vectors into adcpDrift dictionary
'''
if self.args.adcpPlatform:
adcpQS = MeasuredParameter.objects.using(self.args.database).filter(
measurement__instantpoint__activity__platform__name=self.args.adcpPlatform)
if self.startDatetime:
adcpQS = adcpQS.filter(measurement__instantpoint__timevalue__gte=self.startDatetime)
if self.endDatetime:
adcpQS = adcpQS.filter(measurement__instantpoint__timevalue__lte=self.endDatetime)
if self.args.adcpMinDepth:
adcpQS = adcpQS.filter(measurement__depth__gte=self.args.adcpMinDepth)
if self.args.adcpMaxDepth:
adcpQS = adcpQS.filter(measurement__depth__lte=self.args.adcpMaxDepth)
utd = adcpQS.filter(parameter__standard_name='eastward_sea_water_velocity').values_list(
'datavalue', 'measurement__instantpoint__timevalue', 'measurement__depth').order_by(
'measurement__depth', 'measurement__instantpoint__timevalue')
vtd = adcpQS.filter(parameter__standard_name='northward_sea_water_velocity').values_list(
'datavalue', 'measurement__instantpoint__timevalue', 'measurement__depth').order_by(
'measurement__depth', 'measurement__instantpoint__timevalue')
# Compute positions (progressive vectors) - horizontal displacement in meters
x = defaultdict(lambda: [])
y = defaultdict(lambda: [])
last_udiff = None
for i, ((u, ut, ud), (v, vt, vd)) in enumerate(zip(utd, vtd)):
try:
udiff = utd[i+1][1] - ut
vdiff = vtd[i+1][1] - vt
except IndexError as e:
# Extrapolate using last time difference, assuming it's regular and that we are at the last point, works only for very last point
udiff = last_udiff
vdiff = last_udiff
else:
last_udiff = udiff
if udiff != vdiff:
raise Exception('udiff != vdiff')
else:
dt = udiff.seconds + udiff.days * 24 * 3600
if dt < 0:
# For intermediate depths where (utd[i+1][1] - ut) is a diff with the time of the next depth
dt = last_dt
if ud != vd:
raise Exception('ud != vd')
else:
x[ud].append(u * dt / 100)
y[vd].append(v * dt / 100)
self.adcpDrift[ud]['es'].append(time.mktime(ut.timetuple()))
last_dt = dt
# Work in UTM space to add x & y offsets to begining position of the mooring
g0 = NominalLocation.objects.using(self.args.database).filter(activity__platform__name=self.args.adcpPlatform).values_list('geom')[0][0]
p = pyproj.Proj(proj='utm', zone=10, ellps='WGS84')
e0, n0 = p(g0.x, g0.y)
for depth in x:
eList = np.cumsum([e0] + x[depth])
nList = np.cumsum([n0] + y[depth])
lonList, latList = p(eList, nList, inverse=True)
self.adcpDrift[depth]['lon'] = lonList
self.adcpDrift[depth]['lat'] = latList
def loadSTOQSData(self):
'''Fill up stoqsDrift dictionary with platform_parameter as key
'''
for url in self.args.stoqsData:
# Careful - trackingdb returns the records in reverse time order
if self.args.verbose:
print('Opening', url)
with closing(requests.get(url.replace(' ', '%20'), stream=True)) as resp:
if resp.status_code != 200:
logger.error('Cannot read %s, resp.status_code = %s', url, resp.status_code)
return
r_decoded = (line.decode('utf-8') for line in resp.iter_lines())
for r in csv.DictReader(r_decoded):
# Use logic to skip inserting values if one or the other or both start and end are specified
dt = datetime.strptime(r['measurement__instantpoint__timevalue'], '%Y-%m-%d %H:%M:%S')
if self.startDatetime:
if dt < self.startDatetime:
continue
if self.endDatetime:
if dt > self.endDatetime:
continue
if self.args.verbose > 1:
print(r)
apQS = ActivityParameter.objects.using(self.args.database).filter(
activity__name=r['measurement__instantpoint__activity__name'],
parameter__name=r['parameter__name'])
# Mash together a key composed of parameter, platform, min, max for the Activity
key = "%s,%s,%f,%f" % ( r['parameter__name'], r['measurement__instantpoint__activity__platform__name'],
apQS[0].p025, apQS[0].p975 )
self.stoqsDrift[key]['es'].append(time.mktime(dt.timetuple()))
lon, lat = r['measurement__geom'].split('(')[-1].split(')')[0].split(' ')
self.stoqsDrift[key]['lat'].append(float(lat))
self.stoqsDrift[key]['lon'].append(float(lon))
self.stoqsDrift[key]['depth'].append(float(r['measurement__depth']))
self.stoqsDrift[key]['datavalue'].append(r['datavalue'])
def process(self):
'''Read in data and build structures that we can generate products from
'''
if self.args.trackData:
self.loadTrackingData()
if self.args.adcpPlatform:
self.computeADCPDrift()
if self.args.stoqsData:
self.loadSTOQSData()
def getExtent(self):
'''For all data members find the min and max latitude and longitude
'''
if self.args.extent:
return [float(e) for e in self.args.extent]
else:
lonMin = 180
lonMax = -180
latMin = 90
latMax = -90
for drift in (self.trackDrift, self.adcpDrift, self.stoqsDrift):
for k,v in list(drift.items()):
if np.min(v['lon']) < lonMin:
lonMin = np.min(v['lon'])
if np.max(v['lon']) > lonMax:
lonMax = np.max(v['lon'])
if np.min(v['lat']) < latMin:
latMin = np.min(v['lat'])
if np.max(v['lat']) > latMax:
latMax = np.max(v['lat'])
# Expand the computed extent by extendDeg degrees
extendDeg = self.args.extend
return lonMin - extendDeg, latMin - extendDeg, lonMax + extendDeg, latMax + extendDeg
def createPNG(self, fileName=None, forGeotiff=False):
'''Draw processed data on a map and save it as a .png file
'''
if not forGeotiff:
fig = plt.figure(figsize=(18, 12))
ax = plt.axes()
else:
fig = plt.figure()
ax = fig.add_axes((0,0,1,1))
if not fileName:
fileName = self.args.pngFileName
e = self.getExtent()
m = Basemap(llcrnrlon=e[0], llcrnrlat=e[1], urcrnrlon=e[2], urcrnrlat=e[3], projection='cyl', resolution='l', ax=ax)
if not forGeotiff:
m.arcgisimage(server='http://services.arcgisonline.com/ArcGIS', service='Ocean_Basemap')
for depth, drift in list(self.adcpDrift.items()):
m.plot(drift['lon'], drift['lat'], '-', c='black', linewidth=1)
plt.text(drift['lon'][-1], drift['lat'][-1], '%i m' % depth, size='small')
for platform, drift in list(self.trackDrift.items()):
# Ad hoc coloring of platforms...
if platform.startswith('stella'):
color = 'yellow'
elif platform.startswith('daphne'):
color = 'orange'
elif platform.startswith('makai'):
color = 'magenta'
else:
color = 'red'
m.plot(drift['lon'], drift['lat'], '-', c=color, linewidth=2)
plt.text(drift['lon'][-1], drift['lat'][-1], platform, size='small')
# Plot each data point with it's own color based on the activity statistics from STOQS
coloredDotSize = 30
clt = readCLT(os.path.join(settings.ROOT_DIR('static'), 'colormaps', 'jetplus.txt'))
cm_jetplus = matplotlib.colors.ListedColormap(np.array(clt))
for key, drift in list(self.stoqsDrift.items()):
min, max = key.split(',')[2:4]
ax.scatter(drift['lon'], drift['lat'], c=drift['datavalue'], s=coloredDotSize, cmap=cm_jetplus, lw=0, vmin=min, vmax=max)
label = '%s from %s' % tuple(key.split(',')[:2])
plt.text(drift['lon'][-1], drift['lat'][-1], label, size='small')
nowLocal = str(pytz.utc.localize(datetime.now()).astimezone(pytz.timezone('America/Los_Angeles'))).split('.')[0]
plt.text(0.99, 0.01, 'Created: ' + nowLocal + ' Local', horizontalalignment='right', verticalalignment='bottom', transform=ax.transAxes)
if not forGeotiff:
m.drawparallels(np.linspace(e[1],e[3],num=3), labels=[True,False,False,False], linewidth=0)
m.drawmeridians(np.linspace(e[0],e[2],num=3), labels=[False,False,False,True], linewidth=0)
try:
plt.title(self.title)
except AttributeError:
pass
fig.savefig(fileName)
print('Wrote file', self.args.pngFileName)
else:
plt.axis('off')
try:
plt.text(0.5, 0.95, self.title, horizontalalignment='center', verticalalignment='top', transform=ax.transAxes)
except AttributeError:
pass
fig.savefig(fileName, transparent=True, dpi=300, bbox_inches='tight', pad_inches=0)
plt.clf()
plt.close()
def createGeoTiff(self):
'''Your image must be only the geoplot with no decorations like axis titles, axis labels, etc., and you
will need accurate upper-left and lower-right coordinates in EPSG:4326 projection, also known as WGS 84 projection,...
The syntax is pretty straightforward, something like the following will convert your image to the correct format:
gdal_translate <image.png> <image.tiff> -a_ullr -122.25 37.1 -121.57365 36.67558
There is also a python wrapper for the GDAL library
https://pypi.python.org/pypi/GDAL/
'''
e = self.getExtent()
self.createPNG(self.args.geotiffFileName + '.png', forGeotiff=True)
cmd = 'gdal_translate %s %s -a_ullr %s %s %s %s' % (self.args.geotiffFileName + '.png',
self.args.geotiffFileName, e[0], e[3], e[2], e[1])
print("Executing:\n", cmd)
os.system(cmd)
os.remove(self.args.geotiffFileName + '.png')
print('Wrote file', self.args.geotiffFileName)
def createKML(self):
'''Reuse STOQS utils/Viz code to build some simple KML. Use 'position' for Parameter Name.
Fudge data value to distinguish platforms by color, use 0.0 for depth except for adcp data.
'''
request = HttpRequest()
qs = None
qparams = {}
stoqs_object_name = None
kml = KML(request, qs, qparams, stoqs_object_name, withTimeStamps=True, withLineStrings=True, withFullIconURL=True)
# Put data into form that KML() expects - use different datavalues (-1, 1) to color the platforms
dataHash = defaultdict(lambda: [])
colors = {}
values = np.linspace(-1, 1, len(list(self.trackDrift.keys())))
for i, k in enumerate(self.trackDrift.keys()):
colors[k] = values[i]
for platform, drift in list(self.trackDrift.items()):
for es, lo, la in zip(drift['es'], drift['lon'], drift['lat']):
dataHash[platform].append([datetime.utcfromtimestamp(es), lo, la, 0.0, 'position', colors[platform], platform])
for depth, drift in list(self.adcpDrift.items()):
for es, lo, la in zip(drift['es'], drift['lon'], drift['lat']):
dataHash[depth].append([datetime.utcfromtimestamp(es), lo, la, float(depth), 'position', 0.0, 'adcp'])
for key, drift in list(self.stoqsDrift.items()):
parm, plat = key.split(',')[:2]
for es, lo, la, de, dv in zip(drift['es'], drift['lon'], drift['lat'], drift['depth'], drift['datavalue']):
dataHash[parm].append([datetime.utcfromtimestamp(es), lo, la, de, parm, dv, plat])
try:
title = self.title
except AttributeError:
title = 'Product of STOQS drift_data.py'
description = self.commandline.replace('&','&')
kml = kml.makeKML(self.args.database, dataHash, 'position', title, description, 0.0, 0.0 )
fh = open(self.args.kmlFileName, 'w')
fh.write(kml)
fh.close()
print('Wrote file', self.args.kmlFileName)
def process_command_line(self):
'''The argparse library is included in Python 2.7 and is an added package for STOQS.
'''
import argparse
from argparse import RawTextHelpFormatter
examples = 'Examples:' + '\n\n'
examples += "M1 ADCP progressive vector diagram and Stella and Rachel Carson position data:\n"
examples += sys.argv[0] + " --database stoqs_september2014 --adcpPlatform M1_Mooring --adcpMinDepth 30 --adcpMaxDepth 40"
examples += " --trackData http://odss.mbari.org/trackingdb/position/stella101/between/20140922T171500/20141010T000000/data.csv"
examples += " http://odss.mbari.org/trackingdb/position/R_CARSON/between/20140922T171500/20141010T000000/data.csv"
examples += " http://odss.mbari.org/trackingdb/position/stella122/between/20140922T171500/20141010T000000/data.csv"
examples += " --pngFileName foo.png --start 20140923T180000 --end 20140925T150000"
examples += "\n"
examples += '\nIf running from cde-package replace ".py" with ".py.cde".'
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Script to produce products to help understand drift caused by currents in the ocean',
epilog=examples)
parser.add_argument('-d', '--database', action='store', help='Database alias', default='stoqs_september2014')
parser.add_argument('--adcpPlatform', action='store', help='STOQS Platform Name for ADCP data')
parser.add_argument('--adcpMinDepth', action='store', help='Minimum depth of ADCP data for progressive vector data', type=float)
parser.add_argument('--adcpMaxDepth', action='store', help='Maximum depth of ADCP data for progressive vector data', type=float)
parser.add_argument('--trackData', action='store', help='List of MBARItracking database .csv urls for data from drifters, ships, etc.', nargs='*', default=[])
parser.add_argument('--stoqsData', action='store', help='List of STOQS MeasuredParameter Data Access .csv urls for parameter data', nargs='*', default=[])
parser.add_argument('--start', action='store', help='Start time in YYYYMMDDTHHMMSS format')
parser.add_argument('--end', action='store', help='End time in YYYYMMDDTHHMMSS format')
parser.add_argument('--extend', action='store', help='Extend the data extent for the map boundaries by this value in degrees', default=0.05, type=float)
parser.add_argument('--extent', action='store', help='Space separated specific map boundary in degrees: ll_lon ll_lat ur_lon ur_lat', nargs=4, default=[])
parser.add_argument('--title', action='store', help='Title for plots, will override default title created if --start specified')
parser.add_argument('--kmlFileName', action='store', help='Name of file for KML output')
parser.add_argument('--pngFileName', action='store', help='Name of file for PNG image of map')
parser.add_argument('--geotiffFileName', action='store', help='Name of file for geotiff image of map')
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. Higher number = more output.', const=1)
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
self.startDatetime = None
# Make both naiive and timezone aware datetime data members
if self.args.start:
self.startDatetime = datetime.strptime(self.args.start, '%Y%m%dT%H%M%S')
self.startDatetimeUTC = pytz.utc.localize(self.startDatetime)
self.startDatetimeLocal = self.startDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
self.title = 'Drift since %s' % self.startDatetimeLocal
self.endDatetime = None
if self.args.end:
self.endDatetime = datetime.strptime(self.args.end, '%Y%m%dT%H%M%S')
self.endDatetimeUTC = pytz.utc.localize(self.endDatetime)
self.endDatetimeLocal = self.endDatetimeUTC.astimezone(pytz.timezone('America/Los_Angeles'))
if self.args.title:
self.title = self.args.title
if __name__ == '__main__':
d = Drift()
d.process_command_line()
d.process()
if d.args.pngFileName:
d.createPNG()
if d.args.geotiffFileName:
d.createGeoTiff()
if d.args.kmlFileName:
d.createKML()
| gpl-3.0 |
datapythonista/pandas | pandas/tests/frame/methods/test_update.py | 3 | 4604 | import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestDataFrameUpdate:
def test_update_nan(self):
# #15593 #15617
# test 1
df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = df1.copy()
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
# test 2
df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
def test_update(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
df.update(other)
expected = DataFrame(
[[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
tm.assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"bad_kwarg, exception, msg",
[
# errors must be 'ignore' or 'raise'
({"errors": "something"}, ValueError, "The parameter errors must.*"),
({"join": "inner"}, NotImplementedError, "Only left join is supported"),
],
)
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
df = DataFrame([[1.5, 1, 3.0]])
with pytest.raises(exception, match=msg):
df.update(df, **bad_kwarg)
def test_update_raise_on_overlap(self):
df = DataFrame(
[[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
with pytest.raises(ValueError, match="Data overlaps"):
df.update(other, errors="raise")
def test_update_from_non_df(self):
d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
df = DataFrame(d)
d["a"] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
df = DataFrame(d)
d["a"] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
def test_update_datetime_tz(self):
# GH 25807
result = DataFrame([pd.Timestamp("2019", tz="UTC")])
result.update(result)
expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
tm.assert_frame_equal(result, expected)
def test_update_with_different_dtype(self):
# GH#3217
df = DataFrame({"a": [1, 3], "b": [np.nan, 2]})
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame({"a": [1, 3], "b": [np.nan, 2], "c": ["foo", np.nan]})
tm.assert_frame_equal(df, expected)
| bsd-3-clause |
Titan-C/scikit-learn | examples/neighbors/plot_regression.py | 15 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
# #############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
# #############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
RomainBrault/OVFM | OVFM/DataGeneration.py | 1 | 2814 | import math
import numpy
import numpy.linalg
import numpy.random
import Model
def generate_inputs( nb_data, i_dim, shape = 'cube' ):
inputs = 2 * numpy.random.rand( nb_data, i_dim ) - 1
if shape == 'ball':
return inputs[ numpy.linalg.norm( inputs, ord = 2, axis = 1 ) <= 1, : ]
elif shape == 'sphere':
return inputs / numpy.linalg.norm( inputs, ord = 2, axis = 1 )[ :, numpy.newaxis ]
elif 'cube':
return inputs
def generate_outputs( X, model ):
model.coefs = numpy.random.normal( 0, 1, model.coefs.shape )
return model( X )
def mesh2array( X, Y ):
arr = numpy.empty( ( X.size, 2 ) )
arr[ :, 0 ] = X.ravel( )
arr[ :, 1 ] = Y.ravel( )
return arr
def array2mesh( arr, side = None ):
if side == None:
side = int( math.sqrt( arr.shape[ 0 ] ) )
X = arr[ :, 0 ].reshape( ( side, side ) )
Y = arr[ :, 1 ].reshape( ( side, side ) )
return X, Y
def generate_2Dvectorfield( side, model, shape = ( -1, 1, -1, 1 ), w = None ):
model.coefs = numpy.random.normal( 0, 1, model.coefs.shape )
Y, X = numpy.mgrid[ shape[ 0 ]:shape[ 1 ]:side*1j, shape[ 0 ]:shape[ 1 ]:side*1j ]
U, V = array2mesh( model( mesh2array( X, Y ) ), side )
return X, Y, U, V, w
def plot_error( filename = 'sgd_error.txt' ):
import matplotlib.pyplot as plt
error = numpy.genfromtxt( filename, skip_header = 1, delimiter = ' ' )
fig1, axarr = plt.subplots( 3, 2 )
axarr[ 0, 0 ].plot( error[ :, 0 ], error[ :, 1 ] )
# axarr[ 0, 0 ].set_xlabel( 'Iterations' )
axarr[ 0, 0 ].set_ylabel( 'MSE' )
axarr[ 0, 1 ].plot( error[ :, 1 ], error[ :, 4 ] )
# axarr[ 0, 1 ].set_xlabel( 'MSE' )
axarr[ 0, 1 ].set_ylabel( '$1/D\sum_{i=0}^D\Theta_i=0$' )
axarr[ 0, 1 ].set_xscale( 'log' )
axarr[ 0, 1 ].invert_xaxis()
axarr[ 1, 0 ].plot( error[ :, 0 ], error[ :, 2 ] )
# axarr[ 1, 0 ].set_xlabel( 'Iterations' )
axarr[ 1, 0 ].set_ylabel( '$||\Theta||_1$' )
axarr[ 1, 1 ].plot( error[ :, 1 ], error[ :, 2 ] )
# axarr[ 1, 1 ].set_xlabel( 'MSE' )
axarr[ 1, 1 ].set_ylabel( '$||\Theta||_1$' )
axarr[ 1, 1 ].set_xscale( 'log' )
axarr[ 1, 1 ].invert_xaxis( )
axarr[ 2, 0 ].plot( error[ :, 0 ], error[ :, 3 ] )
axarr[ 2, 0 ].set_xlabel( 'Iterations' )
axarr[ 2, 0 ].set_ylabel( '$||\Theta||_2$' )
axarr[ 2, 1 ].plot( error[ :, 1 ], error[ :, 3 ] )
axarr[ 2, 1 ].set_xlabel( 'MSE' )
axarr[ 2, 1 ].set_ylabel( '$||\Theta||_2$' )
axarr[ 2, 1 ].set_xscale( 'log' )
axarr[ 2, 1 ].invert_xaxis( )
plt.show()
def mean_score( scores ):
import scipy.stats as sps
"""Print the empirical mean score and standard deviation of the scores of a cross validation"""
return ( "Mean score: {0} (+/-{1})" ).format( numpy.mean( scores ), sps.sem( scores ) )
| mit |
ElDeveloper/scikit-learn | sklearn/discriminant_analysis.py | 4 | 27419 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.scale_ * ledoit_wolf(X)[0] * sc.scale_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
ozak/geopandas | benchmarks/plotting.py | 4 | 2124 | import random
from geopandas import GeoDataFrame, GeoSeries
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
import numpy as np
class Bench:
param_names = ['geom_type']
params = [('Point', 'LineString', 'Polygon', 'MultiPolygon', 'mixed')]
def setup(self, geom_type):
if geom_type == 'Point':
geoms = GeoSeries([Point(i, i) for i in range(1000)])
elif geom_type == 'LineString':
geoms = GeoSeries([LineString([(random.random(), random.random())
for _ in range(5)])
for _ in range(100)])
elif geom_type == 'Polygon':
geoms = GeoSeries([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(100)])
elif geom_type == 'MultiPolygon':
geoms = GeoSeries(
[MultiPolygon([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(3)])
for _ in range(20)])
elif geom_type == 'mixed':
g1 = GeoSeries([Point(i, i) for i in range(100)])
g2 = GeoSeries([LineString([(random.random(), random.random())
for _ in range(5)])
for _ in range(100)])
g3 = GeoSeries([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(100)])
geoms = g1
geoms.iloc[np.random.randint(0, 100, 50)] = g2
geoms.iloc[np.random.randint(0, 100, 33)] = g3
print(geoms.geom_type.value_counts())
df = GeoDataFrame({'geometry': geoms,
'values': np.random.randn(len(geoms))})
self.geoms = geoms
self.df = df
def time_plot_series(self, *args):
self.geoms.plot()
def time_plot_values(self, *args):
self.df.plot(column='values')
| bsd-3-clause |
Obus/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/nolearn/model.py | 2 | 2706 | import warnings
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.joblib import delayed
from sklearn.externals.joblib import Parallel
warnings.warn("""
The nolearn.model module will be removed in nolearn 0.6. If you want
to continue to use this module, please consider copying the code into
your own project.
""")
class AbstractModel(object):
"""A small abstraction around :class:`~sklearn.pipeline.Pipeline`
objects.
Allows the convenient parametrization of the underlying pipeline
through :attr:`~AbstractModel.params`.
"""
default_params = dict()
def __init__(self, **kwargs):
"""
:param kwargs: Keyword arguments correspond to pipeline
parameters, and will override parameters in
:attr:`~AbstractModel.default_params`.
"""
params = self.default_params.copy()
params.update(kwargs)
self.params = params
def __call__(self):
"""
:rtype: :class:`~sklearn.pipeline.Pipeline`
"""
pipeline = self.pipeline
pipeline.set_params(**self.params)
return pipeline
@property
def pipeline(self): # pragma: no cover
raise NotImplementedError()
def _avgest_fit_est(est, i, X, y, verbose):
if verbose:
print "[AveragingEstimator] estimator_%s.fit() ..." % i
return est.fit(X, y)
def _avgest_predict_proba(est, i, X, verbose):
if verbose:
print "[AveragingEstimator] estimator_%s.predict_proba() ..." % i
return est.predict_proba(X)
class AveragingEstimator(BaseEstimator):
"""An estimator that wraps a list of other estimators and returns
their average for :meth:`fit`, :meth:`predict` and
:meth:`predict_proba`.
"""
def __init__(self, estimators, verbose=0, n_jobs=1):
"""
:param estimators: List of estimator objects.
"""
self.estimators = estimators
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, X, y):
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_avgest_fit_est)(est, i, X, y, self.verbose)
for i, est in enumerate(self.estimators))
self.estimators = result
return self
def predict(self, X):
return np.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X):
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_avgest_predict_proba)(est, i, X, self.verbose)
for i, est in enumerate(self.estimators))
for proba in result[1:]:
result[0] += proba
return result[0] / len(self.estimators)
| bsd-3-clause |
mila-iqia/babyai | scripts/il_dataeff.py | 1 | 1237 | #!/usr/bin/env python3
import argparse
import pandas
import os
import json
from babyai import plotting
parser = argparse.ArgumentParser("Analyze data efficiency of imitation learning")
parser.add_argument('--path', default='.')
parser.add_argument("--regex", default='.*')
parser.add_argument("--patience", default=2, type=int)
parser.add_argument("--window", default=1, type=int)
parser.add_argument("--limit", default="frames")
parser.add_argument("report")
args = parser.parse_args()
if os.path.exists(args.report):
raise ValueError("report directory already exists")
os.mkdir(args.report)
summary_path = os.path.join(args.report, 'summary.csv')
figure_path = os.path.join(args.report, 'visualization.png')
result_path = os.path.join(args.report, 'result.json')
df_logs = pandas.concat(plotting.load_logs(args.path), sort=True)
df_success_rate, normal_time = plotting.best_within_normal_time(
df_logs, args.regex,
patience=args.patience, window=args.window, limit=args.limit,
summary_path=summary_path)
result = plotting.estimate_sample_efficiency(
df_success_rate, visualize=True, figure_path=figure_path)
result['normal_time'] = normal_time
with open(result_path, 'w') as dst:
json.dump(result, dst)
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
jdrudolph/scikit-bio | skbio/stats/ordination/_redundancy_analysis.py | 8 | 8953 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from skbio._base import OrdinationResults
from ._utils import corr, svd_rank, scale
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def rda(y, x, scale_Y=False, scaling=1):
r"""Compute redundancy analysis, a type of canonical analysis.
It is related to PCA and multiple regression because the explained
variables `y` are fitted to the explanatory variables `x` and PCA
is then performed on the fitted values. A similar process is
performed on the residuals.
RDA should be chosen if the studied gradient is small, and CCA
when it's large, so that the contingency table is sparse.
Parameters
----------
y : pd.DataFrame
:math:`n \times p` response matrix, where :math:`n` is the number
of samples and :math:`p` is the number of features. Its columns
need be dimensionally homogeneous (or you can set `scale_Y=True`).
This matrix is also referred to as the community matrix that
commonly stores information about species abundances
x : pd.DataFrame
:math:`n \times m, n \geq m` matrix of explanatory
variables, where :math:`n` is the number of samples and
:math:`m` is the number of metadata variables. Its columns
need not be standardized, but doing so turns regression
coefficients into standard regression coefficients.
scale_Y : bool, optional
Controls whether the response matrix columns are scaled to
have unit standard deviation. Defaults to `False`.
scaling : int
Scaling type 1 produces a distance biplot. It focuses on
the ordination of rows (samples) because their transformed
distances approximate their original euclidean
distances. Especially interesting when most explanatory
variables are binary.
Scaling type 2 produces a correlation biplot. It focuses
on the relationships among explained variables (`y`). It
is interpreted like scaling type 1, but taking into
account that distances between objects don't approximate
their euclidean distances.
See more details about distance and correlation biplots in
[1]_, \S 9.1.4.
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit),
transformed coordinates for feature and samples, biplot
scores, sample constraints, etc.
Notes
-----
The algorithm is based on [1]_, \S 11.1, and is expected to
give the same results as ``rda(y, x)`` in R's package vegan.
See Also
--------
cca
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.as_matrix()
X = x.as_matrix()
n, p = y.shape
n_, m = x.shape
if n != n_:
raise ValueError(
"Both data matrices must have the same number of rows.")
if n < m:
# Mmm actually vegan is able to do this case, too
raise ValueError(
"Explanatory variables cannot have less rows than columns.")
sample_ids = y.index
feature_ids = y.columns
# Centre response variables (they must be dimensionally
# homogeneous)
Y = scale(Y, with_std=scale_Y)
# Centre explanatory variables
X = scale(X, with_std=False)
# Distribution of variables should be examined and transformed
# if necessary (see paragraph 4 in p. 580 L&L 1998)
# Compute Y_hat (fitted values by multivariate linear
# regression, that is, linear least squares). Formula 11.6 in
# L&L 1998 involves solving the normal equations, but that fails
# when cond(X) ~ eps**(-0.5). A more expensive but much more
# stable solution (fails when cond(X) ~ eps**-1) is computed
# using the QR decomposition of X = QR:
# (11.6) Y_hat = X [X' X]^{-1} X' Y
# = QR [R'Q' QR]^{-1} R'Q' Y
# = QR [R' R]^{-1} R'Q' Y
# = QR R^{-1} R'^{-1} R' Q' Y
# = Q Q' Y
# and B (matrix of regression coefficients)
# (11.4) B = [X' X]^{-1} X' Y
# = R^{-1} R'^{-1} R' Q' Y
# = R^{-1} Q'
# Q, R = np.linalg.qr(X)
# Y_hat = Q.dot(Q.T).dot(Y)
# B = scipy.linalg.solve_triangular(R, Q.T.dot(Y))
# This works provided X has full rank. When not, you can still
# fix it using R's pseudoinverse or partitioning R. To avoid any
# issues, like the numerical instability when trying to
# reproduce an example in L&L where X was rank-deficient, we'll
# just use `np.linalg.lstsq`, which uses the SVD decomposition
# under the hood and so it's also more expensive.
B, _, rank_X, _ = lstsq(X, Y)
Y_hat = X.dot(B)
# Now let's perform PCA on the fitted values from the multiple
# regression
u, s, vt = svd(Y_hat, full_matrices=False)
# vt are the right eigenvectors, which is what we need to
# perform PCA. That is, we're changing points in Y_hat from the
# canonical basis to the orthonormal basis given by the right
# eigenvectors of Y_hat (or equivalently, the eigenvectors of
# the covariance matrix Y_hat.T.dot(Y_hat))
# See 3) in p. 583 in L&L 1998
rank = svd_rank(Y_hat.shape, s)
# Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues
U = vt[:rank].T # U as in Fig. 11.2
# Ordination in the space of response variables. Its columns are
# sample scores. (Eq. 11.12)
F = Y.dot(U)
# Ordination in the space of explanatory variables. Its columns
# are fitted sample scores. (Eq. 11.13)
Z = Y_hat.dot(U)
# Canonical coefficients (formula 11.14)
# C = B.dot(U) # Not used
Y_res = Y - Y_hat
# PCA on the residuals
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
# See 9) in p. 587 in L&L 1998
rank_res = svd_rank(Y_res.shape, s_res)
# Theoretically, there're at most min(p, n - 1) non-zero eigenvalues as
U_res = vt_res[:rank_res].T
F_res = Y_res.dot(U_res) # Ordination in the space of residuals
eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
# Compute scores
if scaling not in {1, 2}:
raise NotImplementedError("Only scalings 1, 2 available for RDA.")
# According to the vegan-FAQ.pdf, the scaling factor for scores
# is (notice that L&L 1998 says in p. 586 that such scaling
# doesn't affect the interpretation of a biplot):
pc_ids = ['RDA%d' % (i+1) for i in range(len(eigenvalues))]
eigvals = pd.Series(eigenvalues, index=pc_ids)
const = np.sum(eigenvalues**2)**0.25
if scaling == 1:
scaling_factor = const
elif scaling == 2:
scaling_factor = eigenvalues / const
feature_scores = np.hstack((U, U_res)) * scaling_factor
sample_scores = np.hstack((F, F_res)) / scaling_factor
feature_scores = pd.DataFrame(feature_scores,
index=feature_ids,
columns=pc_ids)
sample_scores = pd.DataFrame(sample_scores,
index=sample_ids,
columns=pc_ids)
# TODO not yet used/displayed
sample_constraints = pd.DataFrame(np.hstack((Z, F_res)) / scaling_factor,
index=sample_ids,
columns=pc_ids)
# Vegan seems to compute them as corr(X[:, :rank_X],
# u) but I don't think that's a good idea. In fact, if
# you take the example shown in Figure 11.3 in L&L 1998 you
# can see that there's an arrow for each of the 4
# environmental variables (depth, coral, sand, other) even if
# other = not(coral or sand)
biplot_scores = pd.DataFrame(corr(X, u))
# The "Correlations of environmental variables with sample
# scores" from table 11.4 are quite similar to vegan's biplot
# scores, but they're computed like this:
# corr(X, F))
p_explained = pd.Series(eigenvalues / eigenvalues.sum(), index=pc_ids)
return OrdinationResults('RDA', 'Redundancy Analysis',
eigvals=eigvals,
proportion_explained=p_explained,
features=feature_scores,
samples=sample_scores,
biplot_scores=biplot_scores,
sample_constraints=sample_constraints)
| bsd-3-clause |
jmanday/Master | TFM/scripts/comparison.py | 1 | 2086 | #########################################################################
### Jesus Garcia Manday
### segmentation.py
### @Descripcion: script para realizar la segmentacion de las imagenes de
### iris y obtener la textura del mismo a traves de dos
### metodos posibles "caht" y "wahet"
#########################################################################
import os
import commands
import sys
import pandas as pd
PATH_EXECUTABLE = "/Users/jesusgarciamanday/Documents/Master/TFM/USITv1.0.3/"
PATH_DATABASES = "/Users/jesusgarciamanday/Documents/Master/TFM/databases/CASIA V4/"
PATH_DATABASES_2 = "/Users/jesusgarciamanday/Documents/Master/TFM/databases/CASIA\ V4/"
PATH_OUTPUTS = "/Users/jesusgarciamanday/Documents/Master/TFM/outputs/segmentation/"
PATH_OUTPUTS2 = "/Users/jesusgarciamanday/Documents/Master/TFM/outputs/feature extraction/"
def getImagesCASIAV4():
vnames_images = []
direc = os.chdir(PATH_DATABASES)
cmd = ("ls " + PATH_DATABASES_2)
res = commands.getstatusoutput(cmd)
if res[0] == 0:
vimages = res[1].split()
for img in vimages:
name_img = img.split('.')
if name_img[1] == 'jpg':
vnames_images.append(img)
else:
print ("Error:" + str(res[0]))
print ("Descripcion: " + res[1])
df = pd.DataFrame([[img, 0, 0, 0, 0, 0, 0, 0] for img in vnames_images], columns = ['image', 'lg', 'qsw', 'ko', 'cr', 'cb', 'dct', 'gfcf'])
df.to_csv(PATH_OUTPUTS2 + 'results.csv', index=False)
return vnames_images
if __name__ == "__main__":
#mode_seg = sys.argv[1]
images_CASIAV4 = getImagesCASIAV4()
#direc = os.chdir(PATH_EXECUTABLE)
#for img in images_CASIAV4:
# cmd = ("./" + mode_seg + " -i " + PATH_DATABASES_2 + img + " -o " + PATH_OUTPUTS + "/" + mode_seg + "/" + img.split('.')[0] + "-texture.png" + " -s 512 64 -e")
# res = commands.getstatusoutput(cmd)
# if(res[0] != 0):
# print ("Error:" + str(res[0]))
# print ("Descripcion: " + res[1])
| apache-2.0 |
pandagod/textclassifier | eval.py | 1 | 5935 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
import csv
import sys
import pandas as pd
from bs4 import BeautifulSoup
import MySQLdb
from langdetect import detect
import pickle
import logging
# Parameters
# ==================================================
# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 128, "Batch Size (default: 64)")
#tf.flags.DEFINE_string("checkpoint_dir", "/Users/zqiao/PycharmProjects/cnn-text-classification-tf/runs/1492410542/checkpoints/", "Checkpoint directory from training run")
tf.flags.DEFINE_string("checkpoint_dir", "./runs/1520514294/checkpoints/", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
f = open('./y_target.pickle', 'rb')
lb = pickle.load(f)
# CHANGE THIS: Load data. Load your own data here
if FLAGS.eval_train:
file = open('./dev_data.pickle', 'rb')
x_test,y_test,x_id_test= pickle.load(file)
y_test = np.argmax(y_test,axis=1)
else:
x_raw=[]
y_test=[]
ids=[]
y_raw=[]
db = MySQLdb.connect("10.249.71.213", "root", "root", "ai")
cursor = db.cursor()
sql = "SELECT sr_number,l1_topic,l2_topic,body,first_l2_topic,subject FROM test01010110"
try:
cursor.execute(sql)
results = cursor.fetchall()
except:
sys.stdout.write("Error: unable to fecth data" + '\n')
db.close()
i=0
j=0
for row in results:
text = BeautifulSoup((row[3]).decode('utf8', 'ignore'), "html.parser").get_text()
try:
if text!='' and text!='NULL' and detect(text)=='en':
i =i+1
if row[2]==row[4]:
j=j+1
ids.append(row[0])
x_raw.append(data_helpers.clean_str(row[5]+" "+text))
y_raw.append(row[2])
except:
print row[0]
print("actual correct rate is ")
print (j/i)
print y_raw
print lb.transform(y_raw)
y_test = np.argmax(lb.transform(y_raw),axis=1)
print y_test
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvaluating...\n")
# Evaluation
# ==================================================
print FLAGS.checkpoint_dir
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
#checkpoint_file = FLAGS.checkpoint_dir+'model-2900'
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
print y_test
print all_predictions
print lb.classes_
#print x_id_test
result = tf.contrib.metrics.confusion_matrix(y_test, all_predictions, len(lb.classes_)).eval()
correct_rate =[]
for index, row in enumerate(result):
correct_rate.append([row[index]/np.sum(row)])
data = np.hstack((result, correct_rate))
df = pd.DataFrame(data=data, index=lb.classes_, columns=np.concatenate((lb.classes_,["correct rate"])))
df.to_csv("confusion_matrix", sep=',', encoding='utf-8')
print df
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Total number of predicted examples: {}".format(len(all_predictions)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
# Save the evaluation to a csv
predictions_human_readable = np.column_stack((np.array(ids), np.array(y_test),[lb.classes_[int(label)] for label in all_predictions]))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable) | apache-2.0 |
balazssimon/ml-playground | udemy/Machine Learning A-Z/Part 2 - Regression/regression_template.py | 1 | 1457 | # Regression Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting the Regression Model to the dataset
# Create your regressor here
# Predicting a new result
y_pred = regressor.predict(X_test)
# Visualising the Regression results
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | apache-2.0 |
fspaolo/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 9 | 2694 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
"""Test delayed input validation in fit (useful for grid search)."""
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
| bsd-3-clause |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tseries/tests/test_tslib.py | 9 | 44977 | import nose
from distutils.version import LooseVersion
import numpy as np
from pandas import tslib
import pandas._period as period
import datetime
from pandas.core.api import Timestamp, Series, Timedelta, Period, to_datetime
from pandas.tslib import get_timezone
from pandas._period import period_asfreq, period_ordinal
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.frequencies import get_freq
import pandas.tseries.tools as tools
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
import pandas.compat as compat
from pandas.util.testing import assert_series_equal
import pandas.compat as compat
class TestTimestamp(tm.TestCase):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime.datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime.datetime(2014, 7, 1, 9, 0, 0, 8), base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'), base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),
('Asia/Tokyo', 9), ('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000, base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00', base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0),
('Asia/Tokyo', 9), ('US/Eastern', -4),
('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
# This should be 2013-11-01 05:00 in UTC -> converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected_repr = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')"
self.assertEqual(repr(result), expected_repr)
self.assertEqual(result, eval(repr(result)))
# This should be 2013-11-01 05:00 in UTC -> converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected_repr = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
self.assertEqual(repr(result), expected_repr)
self.assertEqual(result, eval(repr(result)))
def test_constructor_invalid(self):
with tm.assertRaisesRegexp(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assertRaisesRegexp(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_conversion(self):
# GH 9255
ts = Timestamp('2000-01-01')
result = ts.to_pydatetime()
expected = datetime.datetime(2000, 1, 1)
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
result = ts.to_datetime64()
expected = np.datetime64(ts.value, 'ns')
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
self.assertEqual(result.dtype, expected.dtype)
def test_repr(self):
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
dates = ['2014-03-07', '2014-01-01 09:00', '2014-01-01 00:00:00.000000001']
# dateutil zone change (only matters for repr)
import dateutil
if dateutil.__version__ >= LooseVersion('2.3') and dateutil.__version__ <= LooseVersion('2.4.0'):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
else:
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/America/Los_Angeles']
freqs = ['D', 'M', 'S', 'N']
for date in dates:
for tz in timezones:
for freq in freqs:
# avoid to match with timezone name
freq_repr = "'{0}'".format(freq)
if tz.startswith('dateutil'):
tz_repr = tz.replace('dateutil', '')
else:
tz_repr = tz
date_only = Timestamp(date)
self.assertIn(date, repr(date_only))
self.assertNotIn(tz_repr, repr(date_only))
self.assertNotIn(freq_repr, repr(date_only))
self.assertEqual(date_only, eval(repr(date_only)))
date_tz = Timestamp(date, tz=tz)
self.assertIn(date, repr(date_tz))
self.assertIn(tz_repr, repr(date_tz))
self.assertNotIn(freq_repr, repr(date_tz))
self.assertEqual(date_tz, eval(repr(date_tz)))
date_freq = Timestamp(date, offset=freq)
self.assertIn(date, repr(date_freq))
self.assertNotIn(tz_repr, repr(date_freq))
self.assertIn(freq_repr, repr(date_freq))
self.assertEqual(date_freq, eval(repr(date_freq)))
date_tz_freq = Timestamp(date, tz=tz, offset=freq)
self.assertIn(date, repr(date_tz_freq))
self.assertIn(tz_repr, repr(date_tz_freq))
self.assertIn(freq_repr, repr(date_tz_freq))
self.assertEqual(date_tz_freq, eval(repr(date_tz_freq)))
# this can cause the tz field to be populated, but it's redundant to information in the datestring
tm._skip_if_no_pytz()
import pytz
date_with_utc_offset = Timestamp('2014-03-13 00:00:00-0400', tz=None)
self.assertIn('2014-03-13 00:00:00-0400', repr(date_with_utc_offset))
self.assertNotIn('tzoffset', repr(date_with_utc_offset))
self.assertIn('pytz.FixedOffset(-240)', repr(date_with_utc_offset))
expr = repr(date_with_utc_offset).replace("'pytz.FixedOffset(-240)'",
'pytz.FixedOffset(-240)')
self.assertEqual(date_with_utc_offset, eval(expr))
def test_bounds_with_different_units(self):
out_of_bounds_dates = (
'1677-09-21',
'2262-04-12',
)
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
self.assertRaises(
ValueError,
Timestamp,
np.datetime64(date_string, dtype='M8[%s]' % unit)
)
in_bounds_dates = (
'1677-09-23',
'2262-04-11',
)
for date_string in in_bounds_dates:
for unit in time_units:
Timestamp(
np.datetime64(date_string, dtype='M8[%s]' % unit)
)
def test_tz(self):
t = '2014-02-01 09:00'
ts = Timestamp(t)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local, Timestamp(t, tz='Asia/Tokyo'))
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv,
Timestamp('2014-01-31 19:00', tz='US/Eastern'))
self.assertEqual(conv.hour, 19)
# preserves nanosecond
ts = Timestamp(t) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local.nanosecond, 5)
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv.nanosecond, 5)
self.assertEqual(conv.hour, 19)
def test_tz_localize_ambiguous(self):
ts = Timestamp('2014-11-02 01:00')
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern')
self.assertEqual(rng[1], ts_dst)
self.assertEqual(rng[2], ts_no_dst)
self.assertRaises(ValueError, ts.tz_localize, 'US/Eastern', ambiguous='infer')
# GH 8025
with tm.assertRaisesRegexp(TypeError, 'Cannot localize tz-aware Timestamp, use '
'tz_convert for conversions'):
Timestamp('2011-01-01' ,tz='US/Eastern').tz_localize('Asia/Tokyo')
with tm.assertRaisesRegexp(TypeError, 'Cannot convert tz-naive Timestamp, use '
'tz_localize to localize'):
Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
def test_tz_localize_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00',
'2014-11-05 00:00']:
ts = Timestamp(t)
localized = ts.tz_localize(tz)
self.assertEqual(localized, Timestamp(t, tz=tz))
with tm.assertRaises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
self.assertEqual(reset, ts)
self.assertTrue(reset.tzinfo is None)
def test_tz_convert_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00', '2014-11-01 17:00',
'2014-11-05 00:00']:
ts = Timestamp(t, tz='UTC')
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
self.assertEqual(reset, Timestamp(t))
self.assertTrue(reset.tzinfo is None)
self.assertEqual(reset, converted.tz_convert('UTC').tz_localize(None))
def test_barely_oob_dts(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
self.assertRaises(ValueError, Timestamp, min_ts_us - one_us)
# One us more than the maximum is an error
self.assertRaises(ValueError, Timestamp, max_ts_us + one_us)
def test_utc_z_designator(self):
self.assertEqual(get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo), 'UTC')
def test_now(self):
# #9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None)
- ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None)
- ts_from_method_tz.tz_localize(None)) < delta)
def test_fields(self):
def check(value, equal):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
self.assertEqual(value, equal)
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
self.assertRaises(AttributeError, lambda : ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
def test_nat_fields(self):
# GH 10050
ts = Timestamp('NaT')
self.assertTrue(np.isnan(ts.year))
self.assertTrue(np.isnan(ts.month))
self.assertTrue(np.isnan(ts.day))
self.assertTrue(np.isnan(ts.hour))
self.assertTrue(np.isnan(ts.minute))
self.assertTrue(np.isnan(ts.second))
self.assertTrue(np.isnan(ts.microsecond))
self.assertTrue(np.isnan(ts.nanosecond))
self.assertTrue(np.isnan(ts.dayofweek))
self.assertTrue(np.isnan(ts.quarter))
self.assertTrue(np.isnan(ts.dayofyear))
self.assertTrue(np.isnan(ts.week))
self.assertTrue(np.isnan(ts.daysinmonth))
self.assertTrue(np.isnan(ts.days_in_month))
class TestDatetimeParsingWrappers(tm.TestCase):
def test_does_not_convert_mixed_integer(self):
bad_date_strings = (
'-50000',
'999',
'123.1234',
'm',
'T'
)
for bad_date_string in bad_date_strings:
self.assertFalse(
tslib._does_string_look_like_datetime(bad_date_string)
)
good_date_strings = (
'2012-01-01',
'01/01/2012',
'Mon Sep 16, 2013',
'01012012',
'0101',
'1-1',
)
for good_date_string in good_date_strings:
self.assertTrue(
tslib._does_string_look_like_datetime(good_date_string)
)
def test_parsers(self):
cases = {'2011-01-01': datetime.datetime(2011, 1, 1),
'2Q2005': datetime.datetime(2005, 4, 1),
'2Q05': datetime.datetime(2005, 4, 1),
'2005Q1': datetime.datetime(2005, 1, 1),
'05Q1': datetime.datetime(2005, 1, 1),
'2011Q3': datetime.datetime(2011, 7, 1),
'11Q3': datetime.datetime(2011, 7, 1),
'3Q2011': datetime.datetime(2011, 7, 1),
'3Q11': datetime.datetime(2011, 7, 1),
# quarterly without space
'2000Q4': datetime.datetime(2000, 10, 1),
'00Q4': datetime.datetime(2000, 10, 1),
'4Q2000': datetime.datetime(2000, 10, 1),
'4Q00': datetime.datetime(2000, 10, 1),
'2000q4': datetime.datetime(2000, 10, 1),
'2000-Q4': datetime.datetime(2000, 10, 1),
'00-Q4': datetime.datetime(2000, 10, 1),
'4Q-2000': datetime.datetime(2000, 10, 1),
'4Q-00': datetime.datetime(2000, 10, 1),
'2000q4': datetime.datetime(2000, 10, 1),
'00q4': datetime.datetime(2000, 10, 1),
'2005': datetime.datetime(2005, 1, 1),
'2005-11': datetime.datetime(2005, 11, 1),
'2005 11': datetime.datetime(2005, 11, 1),
'11-2005': datetime.datetime(2005, 11, 1),
'11 2005': datetime.datetime(2005, 11, 1),
'200511': datetime.datetime(2020, 5, 11),
'20051109': datetime.datetime(2005, 11, 9),
'20051109 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'20051109 08H': datetime.datetime(2005, 11, 9, 8, 0),
'2005-11-09 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'2005-11-09 08H': datetime.datetime(2005, 11, 9, 8, 0),
'2005/11/09 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'2005/11/09 08H': datetime.datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime.datetime(2003, 9, 25, 10, 36, 28),
"Thu Sep 25 2003": datetime.datetime(2003, 9, 25),
"Sep 25 2003": datetime.datetime(2003, 9, 25),
"January 1 2014": datetime.datetime(2014, 1, 1),
# GH 10537
'2014-06': datetime.datetime(2014, 6, 1),
'06-2014': datetime.datetime(2014, 6, 1),
'2014-6': datetime.datetime(2014, 6, 1),
'6-2014': datetime.datetime(2014, 6, 1),
}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = to_datetime(np.array([date_str], dtype=object))
result5 = Timestamp(date_str)
result6 = DatetimeIndex([date_str])[0]
result7 = date_range(date_str, freq='S', periods=1)
self.assertEqual(result1, expected)
self.assertEqual(result2, expected)
self.assertEqual(result3, expected)
self.assertEqual(result4, expected)
self.assertEqual(result5, expected)
self.assertEqual(result6, expected)
self.assertEqual(result7, expected)
# NaT
result1, _, _ = tools.parse_time_string('NaT')
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
def test_parsers_quarter_invalid(self):
cases = ['2Q 2005', '2Q-200A', '2Q-200',
'22Q2005', '6Q-20', '2Q200.']
for case in cases:
self.assertRaises(ValueError, tools.parse_time_string, case)
def test_parsers_dayfirst_yearfirst(self):
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False, datetime.datetime(2012, 10, 11)),
(True, False, datetime.datetime(2012, 11, 10)),
(False, True, datetime.datetime(2010, 11, 12)),
(True, True, datetime.datetime(2010, 11, 12))],
'20/12/21': [(False, False, datetime.datetime(2021, 12, 20)),
(True, False, datetime.datetime(2021, 12, 20)),
(False, True, datetime.datetime(2020, 12, 21)),
(True, True, datetime.datetime(2020, 12, 21))]}
tm._skip_if_no_dateutil()
from dateutil.parser import parse
for date_str, values in compat.iteritems(cases):
for dayfirst, yearfirst ,expected in values:
result1, _, _ = tools.parse_time_string(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
result2 = to_datetime(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
result3 = DatetimeIndex([date_str], dayfirst=dayfirst,
yearfirst=yearfirst)[0]
# Timestamp doesn't support dayfirst and yearfirst
self.assertEqual(result1, expected)
self.assertEqual(result2, expected)
self.assertEqual(result3, expected)
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst, yearfirst=yearfirst)
self.assertEqual(dateutil_result, expected)
def test_parsers_timestring(self):
tm._skip_if_no_dateutil()
from dateutil.parser import parse
# must be the same as dateutil result
cases = {'10:15': (parse('10:15'), datetime.datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime.datetime(1, 1, 1, 9, 5)) }
for date_str, (exp_now, exp_def) in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
self.assertEqual(result1, exp_def)
self.assertEqual(result2, exp_now)
self.assertEqual(result3, exp_now)
self.assertEqual(result4, exp_now)
self.assertEqual(result5, exp_now)
def test_parsers_monthfreq(self):
cases = {'201101': datetime.datetime(2011, 1, 1, 0, 0),
'200005': datetime.datetime(2000, 5, 1, 0, 0)}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str, freq='M')
result2 = tools._to_datetime(date_str, freq='M')
self.assertEqual(result1, expected)
self.assertEqual(result2, expected)
def test_parsers_quarterly_with_freq(self):
msg = 'Incorrect quarterly string is given, quarter must be between 1 and 4: 2013Q5'
with tm.assertRaisesRegexp(tslib.DateParseError, msg):
tools.parse_time_string('2013Q5')
# GH 5418
msg = 'Unable to retrieve month information from given freq: INVLD-L-DEC-SAT'
with tm.assertRaisesRegexp(tslib.DateParseError, msg):
tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT')
cases = {('2013Q2', None): datetime.datetime(2013, 4, 1),
('2013Q2', 'A-APR'): datetime.datetime(2012, 8, 1),
('2013-Q2', 'A-DEC'): datetime.datetime(2013, 4, 1)}
for (date_str, freq), exp in compat.iteritems(cases):
result, _, _ = tools.parse_time_string(date_str, freq=freq)
self.assertEqual(result, exp)
class TestArrayToDatetime(tm.TestCase):
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np.array(
[
'2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np.array(
[
'2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_number_looking_strings_not_into_datetime(self):
# #4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
self.assert_numpy_array_equal(tslib.array_to_datetime(arr, errors='ignore'), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
self.assert_numpy_array_equal(tslib.array_to_datetime(arr, errors='ignore'), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
datetime.date(1000, 1, 1),
datetime.datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01'),
]
for invalid_date in invalid_dates:
self.assertRaises(
ValueError,
tslib.array_to_datetime,
np.array([invalid_date], dtype='object'),
errors='raise',
)
self.assertTrue(
np.array_equal(
tslib.array_to_datetime(
np.array([invalid_date], dtype='object'), errors='coerce',
),
np.array([tslib.iNaT], dtype='M8[ns]')
)
)
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np.array(
[
tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
self.assert_numpy_array_equal(tslib.array_to_datetime(arr,errors='ignore'), arr)
# With coercing, the invalid dates becomes iNaT
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np.array(
[
'2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT
],
dtype='M8[ns]'
)
)
def test_parsing_timezone_offsets(self):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
dt_strings = [
'01-01-2013 08:00:00+08:00',
'2013-01-01T08:00:00.000000000+0800',
'2012-12-31T16:00:00.000000000-0800',
'12-31-2012 23:00:00-01:00',
]
expected_output = tslib.array_to_datetime(
np.array(['01-01-2013 00:00:00'], dtype=object)
)
for dt_string in dt_strings:
self.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([dt_string], dtype=object)
),
expected_output
)
class TestTimestampNsOperations(tm.TestCase):
def setUp(self):
self.timestamp = Timestamp(datetime.datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
self.assertEqual(modified_value - value, expected_value)
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'), -123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'), -123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
self.assertEqual(ts.value, expected_value - 9 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
self.assertEqual(ts.value, expected_value + 4 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000005Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
expected = 1293840000000000010
t = t + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(np.datetime64('2011-01-01 00:00:00.000000010Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
def test_nat_arithmetic(self):
# GH 6873
nat = tslib.NaT
t = Timestamp('2014-01-01')
dt = datetime.datetime(2014, 1, 1)
delta = datetime.timedelta(3600)
# Timestamp / datetime
for (left, right) in [(nat, nat), (nat, t), (dt, nat)]:
# NaT + Timestamp-like should raise TypeError
with tm.assertRaises(TypeError):
left + right
with tm.assertRaises(TypeError):
right + left
# NaT - Timestamp-like (or inverse) returns NaT
self.assertTrue((left - right) is tslib.NaT)
self.assertTrue((right - left) is tslib.NaT)
# timedelta-like
# offsets are tested in test_offsets.py
for (left, right) in [(nat, delta)]:
# NaT + timedelta-like returns NaT
self.assertTrue((left + right) is tslib.NaT)
# timedelta-like + NaT should raise TypeError
with tm.assertRaises(TypeError):
right + left
self.assertTrue((left - right) is tslib.NaT)
with tm.assertRaises(TypeError):
right - left
class TestTslib(tm.TestCase):
def test_intraday_conversion_factors(self):
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('H'), False), 24)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('T'), False), 1440)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('S'), False), 86400)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('L'), False), 86400000)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('U'), False), 86400000000)
self.assertEqual(period_asfreq(1, get_freq('D'), get_freq('N'), False), 86400000000000)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('T'), False), 60)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('S'), False), 3600)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('L'), False), 3600000)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('U'), False), 3600000000)
self.assertEqual(period_asfreq(1, get_freq('H'), get_freq('N'), False), 3600000000000)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('S'), False), 60)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('L'), False), 60000)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('U'), False), 60000000)
self.assertEqual(period_asfreq(1, get_freq('T'), get_freq('N'), False), 60000000000)
self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('L'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('U'), False), 1000000)
self.assertEqual(period_asfreq(1, get_freq('S'), get_freq('N'), False), 1000000000)
self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('U'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('L'), get_freq('N'), False), 1000000)
self.assertEqual(period_asfreq(1, get_freq('U'), get_freq('N'), False), 1000)
def test_period_ordinal_start_values(self):
# information for 1.1.1970
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('A')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M')))
self.assertEqual(1, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('B')))
def test_period_ordinal_week(self):
self.assertEqual(1, period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(2, period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(2284, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('W')))
self.assertEqual(2285, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('W')))
def test_period_ordinal_business_day(self):
# Thursday
self.assertEqual(11415, period_ordinal(2013, 10, 3, 0, 0, 0, 0, 0, get_freq('B')))
# Friday
self.assertEqual(11416, period_ordinal(2013, 10, 4, 0, 0, 0, 0, 0, get_freq('B')))
# Saturday
self.assertEqual(11417, period_ordinal(2013, 10, 5, 0, 0, 0, 0, 0, get_freq('B')))
# Sunday
self.assertEqual(11417, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('B')))
# Monday
self.assertEqual(11417, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('B')))
# Tuesday
self.assertEqual(11418, period_ordinal(2013, 10, 8, 0, 0, 0, 0, 0, get_freq('B')))
def test_tslib_tz_convert(self):
def compare_utc_to_local(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)
result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')
result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:
# US: 2014-03-09 - 2014-11-11
# MOSCOW: 2014-10-26 / 2014-12-31
tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)
utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')
compare_utc_to_local(tz_didx, utc_didx)
# local tz to UTC can be differ in hourly (or higher) freqs because of DST
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)
utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)
utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
# Check empty array
result = tslib.tz_convert(np.array([], dtype=np.int64),
tslib.maybe_get_tz('US/Eastern'),
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
# Check all-NaT array
result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
tslib.maybe_get_tz('US/Eastern'),
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array([tslib.iNaT], dtype=np.int64))
def test_shift_months(self):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'), Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'), Timestamp('2000-02-29'), Timestamp('2000-12-31')])
for years in [-1, 0, 1]:
for months in [-2, 0, 2]:
actual = DatetimeIndex(tslib.shift_months(s.asi8, years * 12 + months))
expected = DatetimeIndex([x + offsets.DateOffset(years=years, months=months) for x in s])
tm.assert_index_equal(actual, expected)
class TestTimestampOps(tm.TestCase):
def test_timestamp_and_datetime(self):
self.assertEqual((Timestamp(datetime.datetime(2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1)
self.assertEqual((datetime.datetime(2013, 10, 12) - Timestamp(datetime.datetime(2013, 10, 13))).days, -1)
def test_timestamp_and_series(self):
timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', tz='US/Eastern'))
first_timestamp = timestamp_series[0]
delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])
assert_series_equal(timestamp_series - first_timestamp, delta_series)
assert_series_equal(first_timestamp - timestamp_series, -delta_series)
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time objects
datetime_instance = datetime.datetime(2014, 3, 4)
timedelta_instance = datetime.timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq='D')[0]
self.assertEqual(type(timestamp_instance + 1), Timestamp)
self.assertEqual(type(timestamp_instance - 1), Timestamp)
# Timestamp + datetime not supported, though subtraction is supported and yields timedelta
# more tests in tseries/base/tests/test_base.py
self.assertEqual(type(timestamp_instance - datetime_instance), Timedelta)
self.assertEqual(type(timestamp_instance + timedelta_instance), Timestamp)
self.assertEqual(type(timestamp_instance - timedelta_instance), Timestamp)
# Timestamp +/- datetime64 not supported, so not tested (could possibly assert error raised?)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual(type(timestamp_instance + timedelta64_instance), Timestamp)
self.assertEqual(type(timestamp_instance - timedelta64_instance), Timestamp)
def test_addition_subtraction_preserve_frequency(self):
timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0]
timedelta_instance = datetime.timedelta(days=1)
original_freq = timestamp_instance.freq
self.assertEqual((timestamp_instance + 1).freq, original_freq)
self.assertEqual((timestamp_instance - 1).freq, original_freq)
self.assertEqual((timestamp_instance + timedelta_instance).freq, original_freq)
self.assertEqual((timestamp_instance - timedelta_instance).freq, original_freq)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual((timestamp_instance + timedelta64_instance).freq, original_freq)
self.assertEqual((timestamp_instance - timedelta64_instance).freq, original_freq)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
[period.D_RESO, period.D_RESO, period.D_RESO, period.D_RESO,
period.H_RESO, period.T_RESO, period.S_RESO, period.MS_RESO, period.US_RESO]):
for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Eastern']:
idx = date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
result = period.resolution(idx.asi8, idx.tz)
self.assertEqual(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
djgagne/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.