repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
architecture-building-systems/CEAforArcGIS | cea/utilities/rename_building.py | 2 | 1817 | """
A simple CEA script that renames a building in the input files - NOTE: you'll have to re-run the simulation and
analysis scripts to get the changes as only the files defined in ``inputs.yml`` (the files you see in the CEA Dashboard
input editor) are changed.
This is the script behind ``cea rename-building --old <building> --new <building>``
"""
import os
import cea.config
import cea.inputlocator
from cea.interfaces.dashboard.api.inputs import get_input_database_schemas
import yaml
import geopandas
import cea.utilities.dbf as dbf
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def main(config):
old_building_name = config.rename_building.old
new_building_name = config.rename_building.new
if not new_building_name.strip():
print("Please specify a new name for the building.")
return
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
for input_name, input_schema in get_input_database_schemas().items():
# checked, this is true for all input tables.
location = input_schema["location"]
schemas_io = getattr(locator, location)
file_path = schemas_io()
if not os.path.exists(file_path):
print("Skipping input file {file_path} (not found)".format(file_path=file_path))
continue
print("Processing input file {file_path}".format(file_path=file_path))
df = schemas_io.read()
df.loc[df["Name"] == old_building_name, "Name"] = new_building_name
schemas_io.write(df)
if __name__ == '__main__':
main(cea.config.Configuration()) | mit |
wwf5067/statsmodels | statsmodels/tsa/base/tests/test_base.py | 27 | 2106 | import numpy as np
from pandas import Series
from pandas import date_range
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
def test_pandas_nodates_index():
from statsmodels.datasets import sunspots
y = sunspots.load_pandas().data.SUNACTIVITY
npt.assert_raises(ValueError, TimeSeriesModel, y)
def test_predict_freq():
# test that predicted dates have same frequency
x = np.arange(1,36.)
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
#npt.assert_(model.data.freq == "AS-APR")
npt.assert_(model.data.freq == "A-APR")
start = model._get_predict_start("2006-4-30")
end = model._get_predict_end("2016-4-30")
model._make_predict_dates()
predict_dates = model.data.predict_dates
#expected_dates = date_range("2006-12-31", "2016-12-31",
# freq="AS-APR")
expected_dates = date_range("2006-4-30", "2016-4-30", freq="A-APR")
assert_equal(predict_dates, expected_dates)
#ptesting.assert_series_equal(predict_dates, expected_dates)
def test_keyerror_start_date():
x = np.arange(1,36.)
from pandas import date_range
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
npt.assert_raises(ValueError, model._get_predict_start, "1970-4-30")
def test_period_index():
# test 1285
from pandas import PeriodIndex, TimeSeries
dates = PeriodIndex(start="1/1/1990", periods=20, freq="M")
x = np.arange(1, 21.)
model = TimeSeriesModel(Series(x, index=dates))
npt.assert_(model.data.freq == "M")
model = TimeSeriesModel(TimeSeries(x, index=dates))
npt.assert_(model.data.freq == "M")
| bsd-3-clause |
DonghoChoi/ISB_Project | local/gps_to_location_list.py | 2 | 10362 | #!/usr/bin/python
# Author: Dongho Choi
import os.path
import datetime
import math
import time
import sys
import itertools
import pandas as pd
import numpy as np
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
from geopy.distance import vincenty
import sys
sys.path.append("./configs/")
#sys.path.append("/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/Code/local/configs/")
import server_config # (1) info2_server (2) exploration_db
import matplotlib.pyplot as plt
import seaborn as sns
datetimeFormat = '%Y-%m-%d %H:%M:%S'
close_distance_cut = 40
def is_location_close(location_a, location_b):
#print("Distance:", vincenty(location_a, location_b).meters)
if (vincenty(location_a, location_b).meters <= close_distance_cut):
return True
else:
return False
def find_location(current_location,df_user_location_list): # Return -1 when no close location existing, otherwise the location
for i in range(0, len(df_user_location_list)):
#print("index in df_location_list", i)
temp_location = (df_user_location_list.iloc[i]['latitude'],df_user_location_list.iloc[i]['longitude'])
if (is_location_close(current_location, temp_location) == True):
#print("FOUND ONE CLOSE LOCATION")
return df_user_location_list.iloc[i]['locationID']
#print("No match, returning -1")
return -1
def get_center_of_mass(user_location_list):
x = 0
y = 0
visit_sum = user_location_list['visit_times'].sum()
for i in range(0,len(user_location_list)):
x = x + user_location_list.iloc[i]['visit_times'] * user_location_list.iloc[i]['latitude']
y = y + user_location_list.iloc[i]['visit_times'] * user_location_list.iloc[i]['longitude']
x = x/visit_sum
y = y/visit_sum
return [x, y]
if __name__ == "__main__":
# READ DATA FROM SERVER
#read_Data_from_Server()
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'])
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established.")
# Get individual data
df_individual_data = pd.read_sql('SELECT * FROM individual_data', con=connection)
print("Individual data READ")
# Get the participants list from the table of 'final_participants'
df_participants = pd.read_sql('SELECT * FROM final_participants', con=connection)
print("Participants Table READ")
# Get locations_all table: importing all locations that participants visited.
df_locations_all = pd.read_sql(
"SELECT userID,date_time,latitude,longitude,timestamp FROM locations_all WHERE (userID!=5001)",
con=connection)
print("Locations Table READ")
# READ AND FILL THE PARTICIPANTS LIST WITH COMBINATIONS
participants_list = df_participants['userID'].tolist()
num_participants = len(participants_list) # number of participants
print('number of participants:{0}'.format(num_participants))
## POPULATE VISITS OF PARTICIPANTS
for i in range(0, num_participants-1):
#for i in range(0,1):
df_user_visits = pd.DataFrame(columns=('userID', 'visit_start', 'visit_end', 'dwellTime', 'latitude', 'longitude'))
current_userID = participants_list[i]
df_temp_locations = df_locations_all.loc[df_locations_all['userID'] == current_userID] # location list of a particular user
df_temp_locations = df_temp_locations.sort_values(by='timestamp')
current_location = (df_temp_locations.iloc[0]['latitude'],df_temp_locations.iloc[0]['longitude']) # the first line of the list
visit_start = datetime.datetime.strptime(df_temp_locations.iloc[0]['date_time'],datetimeFormat)
#print("visit_start:",visit_start)
visit_end = datetime.datetime.strptime(df_temp_locations.iloc[0]['date_time'], datetimeFormat)
#print("visit_end:",visit_end)
for j in range(1,len(df_temp_locations)-1):
if (visit_start + datetime.timedelta(minutes=55) > datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'], datetimeFormat)): # when time interval until next record is too small..
#print("too close")
continue
else:
temp_location = (df_temp_locations.iloc[j]['latitude'],df_temp_locations.iloc[j]['longitude'])
#print("distance:",vincenty(current_location, temp_location).meters)
if (vincenty(current_location, temp_location).meters <= close_distance_cut): # When seen the user stays nearby
#print("SAME LOCATION")
visit_end = datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'],datetimeFormat)
#print("visit_end update:",visit_end)
else:
#print("MOVED TO NEW LOCATION")
df_temp_visits = pd.DataFrame(columns=('userID', 'visit_start', 'visit_end', 'dwellTime', 'latitude', 'longitude'))
df_temp_visits.set_value(0,'userID', current_userID)
df_temp_visits.set_value(0,'visit_start', visit_start)
df_temp_visits.set_value(0,'visit_end', visit_end)
df_temp_visits.set_value(0,'dwellTime', visit_end-visit_start)
df_temp_visits.set_value(0,'latitude', current_location[0])
df_temp_visits.set_value(0,'longitude', current_location[1])
df_user_visits = df_user_visits.append(df_temp_visits)
current_location = (df_temp_locations.iloc[j]['latitude'], df_temp_locations.iloc[j]['longitude'])
visit_start = datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'],
datetimeFormat)
visit_end = datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'], datetimeFormat)
#print(df_user_visits)
df_user_location_list = pd.DataFrame(columns=('userID','locationID','latitude','longitude','visit_times','spent_time'))
df_temp_location = pd.DataFrame(columns=('userID', 'locationID', 'latitude', 'longitude'))
df_temp_location.set_value(0, 'userID', current_userID)
df_temp_location.set_value(0, 'locationID', 0)
df_temp_location.set_value(0, 'latitude', df_user_visits.iloc[0]['latitude'])
df_temp_location.set_value(0, 'longitude', df_user_visits.iloc[0]['longitude'])
df_temp_location.set_value(0, 'visit_times', 1)
df_temp_location.set_value(0, 'spent_time', df_user_visits.iloc[0]['dwellTime'])
df_user_location_list = df_user_location_list.append(df_temp_location)
for k in range(1,len(df_user_visits)): # To populate the user's location list with visit_times and spent_time
current_location = (df_user_visits.iloc[k]['latitude'],df_user_visits.iloc[k]['longitude'])
same_location = find_location(current_location,df_user_location_list)
#print("value of same location:",same_location)
if (same_location == -1): # if there is no place close to the current place
#print("same_location = -1")
df_temp_location = pd.DataFrame(columns=('userID','locationID','latitude','longitude'))
df_temp_location.set_value(0, 'userID', current_userID)
df_temp_location.set_value(0, 'locationID', len(df_user_location_list))
df_temp_location.set_value(0, 'latitude', current_location[0])
df_temp_location.set_value(0, 'longitude', current_location[1])
df_temp_location.set_value(0, 'visit_times', 1)
df_temp_location.set_value(0, 'spent_time', df_user_visits.iloc[k]['dwellTime'])
df_user_location_list = df_user_location_list.append(df_temp_location)
else: # when current location can be perceived as the found 'same location'
#print("same_location = :",same_location)
val_visit_times = df_user_location_list.iloc[same_location]['visit_times']
val_spent_time = df_user_location_list.iloc[same_location]['spent_time']
#print("previous visit_times of locationID {0}: {1} becomes to {2}".format(same_location, val_visit_times, val_visit_times + 1))
df_user_location_list.iloc[same_location, df_user_location_list.columns.get_loc('visit_times')]= val_visit_times + 1
df_user_location_list.iloc[same_location, df_user_location_list.columns.get_loc('spent_time')] = val_spent_time + df_user_visits.iloc[k]['dwellTime']
#print(df_user_location_list)
# Calculating the location diversity and loyalty
print("total visits in df_user_location_list:",len(df_user_location_list))
df_user_location_list = (df_user_location_list.loc[df_user_location_list['spent_time'] > datetime.timedelta(seconds=0)]).sort_values(by='visit_times', ascending=False)
for i in range(0, len(df_user_location_list)):
#spent_in_seconds = (df_user_location_list.iloc[i]['spent_time']).total_seconds()
sql = "INSERT INTO user_location_list (userID,locationID,latitude,longitude,visit_times,spent_time) VALUES (" + str(
df_user_location_list.iloc[i]['userID']) + "," + str(df_user_location_list.iloc[i]['locationID']) + "," + str(
df_user_location_list.iloc[i]['latitude']) + "," + str(df_user_location_list.iloc[i]['longitude']) + "," + str(
df_user_location_list.iloc[i]['visit_times']) + "," + str((df_user_location_list.iloc[i]['spent_time']).total_seconds()) + ");"
print(sql)
cursor.execute(sql)
server.stop()
print("End") | gpl-3.0 |
rkmaddox/mne-python | examples/time_frequency/time_frequency_simulated.py | 18 | 8475 | """
======================================================================
Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
======================================================================
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
"""
# Authors: Hari Bharadwaj <[email protected]>
# Denis Engemann <[email protected]>
# Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
from mne.viz import centers_to_edges
print(__doc__)
###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
# which reduces variance. We'll also show some of the parameters that can be
# tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
# properties, and thus a different TFR. You can trade time resolution or
# frequency resolution or both in order to get a reduction in variance.
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
###############################################################################
# **(1) Least smoothing (most variance/background fluctuations).**
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
###############################################################################
# **(2) Less frequency smoothing, more time smoothing.**
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
###############################################################################
# **(3) Less time smoothing, more frequency smoothing.**
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
##############################################################################
# Stockwell (S) transform
# =======================
#
# Stockwell uses a Gaussian window to balance temporal and spectral resolution.
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors. In this case,
# we control the spectral / temporal resolution by specifying different widths
# of the gaussian window using the ``width`` parameter.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
###############################################################################
# Morlet Wavelets
# ===============
#
# Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
# with a gaussian envelope. We can control the balance between spectral and
# temporal resolution with the ``n_cycles`` parameter, which defines the
# number of cycles to include in the window.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
###############################################################################
# Calculating a TFR without averaging over epochs
# -----------------------------------------------
#
# It is also possible to calculate a TFR without averaging across trials.
# We can do this by using ``average=False``. In this case, an instance of
# :class:`mne.time_frequency.EpochsTFR` is returned.
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_freqs, n_times)``.
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
x, y = centers_to_edges(epochs.times * 1000, freqs)
mesh = ax.pcolormesh(x, y, power[0], cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
| bsd-3-clause |
Eric89GXL/mne-python | mne/tests/test_cov.py | 6 | 33641 | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import itertools as itt
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events, create_info)
from mne.channels import equalize_channels
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.utils import (requires_sklearn, run_tests_if_main,
catch_logging, assert_snr)
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(meg=True, eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.raises(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
pytest.raises(ValueError, compute_covariance, epochs,
on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener, rtol=1e-6)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov(tmpdir):
"""Test ad hoc cov creation and I/O."""
out_fname = tmpdir.join('test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
cov['diag'] = False
cov._get_square()
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
def test_io_cov(tmpdir):
"""Test IO for noise covariance matrices."""
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(tmpdir.join('test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', (None, 'empirical', 'shrunk'))
def test_cov_estimation_on_raw(method, tmpdir):
"""Test estimation from raw (typically empty room)."""
if method == 'shrunk':
try:
import sklearn # noqa: F401
except Exception as exp:
pytest.skip('sklearn is required, got %s' % (exp,))
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
method_params = dict(shrunk=dict(shrinkage=[0]))
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(
raw, tstep=None, method=method, rank='full',
method_params=method_params)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e6)
# test equivalence with np.cov
cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1)
if method != 'shrunk': # can check all
off_diag = np.triu_indices(cov_np.shape[0])
else:
# We explicitly zero out off-diag entries between channel types,
# so let's just check MEG off-diag entries
off_diag = np.triu_indices(len(pick_types(raw.info, meg=True,
exclude=())))
for other in (cov_mne, cov):
assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6)
assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3)
assert_snr(cov.data, other.data, 1e6)
# tstep=0.2 (default)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full',
method_params=method_params)
assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 170)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full', method_params=method_params)
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 5e6)
cov = compute_raw_covariance(raw_pick, method=method, rank='full',
method_params=method_params)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method,
method_params=method_params)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
with pytest.warns(None): # sklearn
cov = compute_raw_covariance(
raw.copy().crop(0, 10.), tstep=None, method=method,
reject=dict(eog=1000e-6), method_params=method_params,
verbose='error')
@pytest.mark.slowtest
@requires_sklearn
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# XXX don't use "shrunk" here, for some reason it makes Travis 2.7
# hang... "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert err < tol, '%s >= %s' % (err, tol)
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None))
def test_cov_estimation_with_triggers(rank, tmpdir):
"""Test estimation from raw with triggers."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
cov_km = read_cov(cov_km_fname)
# adjust for nfree bug
cov_km['nfree'] -= 1
_assert_cov(cov, cov_km)
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert np.all(cov.data != cov_tmin_tmax.data)
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert err < 0.05
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert cov.ch_names == cov2.ch_names
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
assert cov_km.nfree == cov.nfree
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='shrunk', rank=rank)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
pytest.raises(ValueError, compute_covariance, epochs)
pytest.raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=[])
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs, projs=[])
pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert cov.ch_names == cov_sum.ch_names
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert cov_sum.ch_names == cov.ch_names
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads', rank='full')
assert noise_cov['dim'] == reg_noise_cov['dim']
assert noise_cov['data'].shape == reg_noise_cov['data'].shape
assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08
# make sure all args are represented
assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads', rank='full')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert np.all(mean_baseline < 1.)
assert np.all(mean_baseline > 0.2)
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
def test_regularized_covariance():
"""Test unchanged data with regularized_covariance."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
data = evoked.data.copy()
# check that input data remain unchanged. gh-5698
_regularized_covariance(data)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_sklearn
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.') % (n_features + 5, n_features)
with pytest.warns(RuntimeWarning, match=msg):
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None, 'info'))
@requires_sklearn
def test_compute_covariance_auto_reg(rank):
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:5]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True, rank=rank)
# make sure regularization produces structured differencess
diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
off_diag_mask = np.invert(diag_mask)
for cov_a, cov_b in itt.combinations(covs, 2):
if (cov_a['method'] == 'diagonal_fixed' and
# here we have diagnoal or no regularization.
cov_b['method'] == 'empirical' and rank == 'full'):
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
# but the rest is the same
assert_allclose(cov_a['data'][off_diag_mask],
cov_b['data'][off_diag_mask], rtol=1e-12)
else:
# and here we have shrinkage everywhere.
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
logliks = [c['loglik'] for c in covs]
assert np.diff(logliks).max() <= 0 # descending order
methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
if rank == 'full':
methods.extend(['factor_analysis', 'pca'])
with catch_logging() as log:
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True, rank=rank,
verbose=True)
log = log.getvalue().split('\n')
if rank is None:
assert ' Setting small MAG eigenvalues to zero (without PCA)' in log
assert 'Reducing data rank from 10 -> 7' in log
else:
assert 'Reducing' not in log
method_names = [cov['method'] for cov in cov3]
best_bounds = [-45, -35]
bounds = [-55, -45] if rank == 'full' else best_bounds
for method in set(methods) - {'empirical', 'shrunk'}:
this_lik = cov3[method_names.index(method)]['loglik']
assert bounds[0] < this_lik < bounds[1]
this_lik = cov3[method_names.index('shrunk')]['loglik']
assert best_bounds[0] < this_lik < best_bounds[1]
this_lik = cov3[method_names.index('empirical')]['loglik']
bounds = [-110, -100] if rank == 'full' else best_bounds
assert bounds[0] < this_lik < bounds[1]
assert_equal({c['method'] for c in cov3}, set(methods))
cov4 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=False, rank=rank)
assert cov3[0]['method'] == cov4['method'] # ordering
# invalid prespecified method
pytest.raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
def _cov_rank(cov, info, proj=True):
# ignore warnings about rank mismatches: sometimes we will intentionally
# violate the computed/info assumption, such as when using SSS with
# `rank='full'`
with pytest.warns(None):
return _compute_rank_int(cov, info=info, proj=proj)
@pytest.fixture(scope='module')
def raw_epochs_events():
"""Create raw, epochs, and events for tests."""
raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)
raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank
assert raw.info['bads'] == [] # no bads
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
return (raw, epochs, events)
@requires_sklearn
@pytest.mark.parametrize('rank', (None, 'full', 'info'))
def test_low_rank_methods(rank, raw_epochs_events):
"""Test low-rank covariance matrix estimation."""
epochs = raw_epochs_events[1]
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
methods = ('empirical', 'diagonal_fixed', 'oas')
bounds = {
'None': dict(empirical=(-15000, -5000),
diagonal_fixed=(-1500, -500),
oas=(-700, -600)),
'full': dict(empirical=(-18000, -8000),
diagonal_fixed=(-2000, -1600),
oas=(-1600, -1000)),
'info': dict(empirical=(-15000, -5000),
diagonal_fixed=(-700, -600),
oas=(-700, -600)),
}
with pytest.warns(RuntimeWarning, match='Too few samples'):
covs = compute_covariance(
epochs, method=methods, return_estimators=True, rank=rank,
verbose=True)
for cov in covs:
method = cov['method']
these_bounds = bounds[str(rank)][method]
this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))
if rank == 'full' and method != 'empirical':
assert this_rank == n_ch
else:
assert this_rank == sss_proj_rank
assert these_bounds[0] < cov['loglik'] < these_bounds[1], \
(rank, method)
@requires_sklearn
def test_low_rank_cov(raw_epochs_events):
"""Test additional properties of low rank computations."""
raw, epochs, events = raw_epochs_events
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
proj_rank = 365 # one EEG proj
with pytest.warns(RuntimeWarning, match='Too few samples'):
emp_cov = compute_covariance(epochs)
# Test equivalence with mne.cov.regularize subspace
with pytest.raises(ValueError, match='are dependent.*must equal'):
regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == proj_rank
with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
_compute_rank_int(reg_cov, info=epochs.info)
del reg_cov
with catch_logging() as log:
reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,
verbose=True)
log = log.getvalue()
assert 'jointly' in log
assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
del reg_r_only_cov, reg_r_cov
# test that rank=306 is same as rank='full'
epochs_meg = epochs.copy().pick_types(meg=True)
assert len(epochs_meg.ch_names) == 306
epochs_meg.info.update(bads=[], projs=[])
cov_full = compute_covariance(epochs_meg, method='oas',
rank='full', verbose='error')
assert _cov_rank(cov_full, epochs_meg.info) == 306
with pytest.warns(RuntimeWarning, match='few samples'):
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306))
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306), verbose='error')
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
# Work with just EEG data to simplify projection / rank reduction
raw = raw.copy().pick_types(meg=False, eeg=True)
n_proj = 2
raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
n_ch = len(raw.ch_names)
rank = n_ch - n_proj - 1 # plus avg proj
assert len(raw.info['projs']) == 3
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
assert len(raw.ch_names) == n_ch
emp_cov = compute_covariance(epochs, rank='full', verbose='error')
assert _cov_rank(emp_cov, epochs.info) == rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == rank
reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_cov, epochs.info) == rank
dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',
verbose='error')
assert _cov_rank(dia_cov, epochs.info) == rank
assert_allclose(dia_cov['data'], reg_cov['data'])
epochs.pick_channels(epochs.ch_names[:103])
# degenerate
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='pca')
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='factor_analysis')
@testing.requires_testing_data
@requires_sklearn
def test_cov_ctf():
"""Test basic cov computation on ctf data with/without compensation."""
raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()
events = make_fixed_length_events(raw, 99999)
assert len(events) == 2
ch_names = [raw.info['ch_names'][pick]
for pick in pick_types(raw.info, meg=True, eeg=False,
ref_meg=False)]
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0.,
method=['empirical'])
prepare_noise_cov(noise_cov, raw.info, ch_names)
raw.apply_gradient_compensation(0)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])
raw.apply_gradient_compensation(1)
# TODO This next call in principle should fail.
prepare_noise_cov(noise_cov, raw.info, ch_names)
# make sure comps matrices was not removed from raw
assert raw.info['comps'], 'Comps matrices removed'
def test_equalize_channels():
"""Test equalization of channels for instances of Covariance."""
cov1 = make_ad_hoc_cov(create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.0,
ch_types='eeg'))
cov2 = make_ad_hoc_cov(create_info(['CH5', 'CH1', 'CH2'], sfreq=1.0,
ch_types='eeg'))
cov1, cov2 = equalize_channels([cov1, cov2])
assert cov1.ch_names == ['CH1', 'CH2']
assert cov2.ch_names == ['CH1', 'CH2']
run_tests_if_main()
| bsd-3-clause |
dfm/savefig | setup.py | 1 | 1426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit(0)
# Handle encoding
major, minor1, minor2, release, serial = sys.version_info
if major >= 3:
def rd(filename):
f = open(filename, encoding="utf-8")
r = f.read()
f.close()
return r
else:
def rd(filename):
f = open(filename)
r = f.read()
f.close()
return r
setup(
name="savefig",
version="0.0.3",
author="Daniel Foreman-Mackey",
author_email="[email protected]",
py_modules=["savefig"],
url="https://github.com/dfm/savefig",
license="MIT",
description="Save matplotlib figures with embedded metadata for "
"reproducibility and profit",
long_description=rd("README.rst"),
package_data={"": ["README.rst", "LICENSE"]},
include_package_data=True,
install_requires=[
'matplotlib',
'pypdf2',
'pillow',
],
classifiers=[
# "Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
)
| mit |
pnedunuri/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/tree/export.py | 35 | 16873 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# License: BSD 3 clause
import numpy as np
import warnings
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__():
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default='tree.dot')
Handle or name of the output file. If ``None``, the result is
returned as a string. This will the default from version 0.20.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
return_string = False
try:
if out_file == SENTINEL:
warnings.warn("out_file can be set to None starting from 0.18. "
"This will be the default in 0.20.",
DeprecationWarning)
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
pkruskal/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
vishwa91/OptSys | examples/reimage.py | 1 | 1356 | #!/usr/bin/env python3
import os, sys
sys.path.append('../modules')
import numpy as np
import matplotlib.pyplot as plt
import raytracing as rt
import visualize as vis
if __name__ == '__main__':
# Create simple system to reimage using lens
components = []
rays = []
image_plane = -120
# System contains just of one lens
components.append(rt.Lens(f=100,
aperture=100,
pos=[0,0],
theta=0))
# Create three points and three rays from each point
rays.append([image_plane, 10, -np.pi/20])
rays.append([image_plane, 10, 0])
rays.append([image_plane, 10, np.pi/20])
rays.append([image_plane, 0, -np.pi/20])
rays.append([image_plane, 0, 0])
rays.append([image_plane, 0, np.pi/20])
rays.append([image_plane, -10, -np.pi/20])
rays.append([image_plane, -10, 0])
rays.append([image_plane, -10, np.pi/20])
colors = 'rrrgggbbb'
# Propagate the rays
ray_bundles = rt.propagate_rays(components, rays)
# Create a new canvas
canvas = vis.Canvas([-200, 600], [-100, 100])
# Draw the components
canvas.draw_components(components)
# Draw the rays
canvas.draw_rays(ray_bundles, colors)
# Show the system
canvas.show()
# Save a copy
canvas.save('reimage.png')
| mit |
RPGOne/scikit-learn | benchmarks/bench_plot_lasso_path.py | 84 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
jakobj/nest-simulator | pynest/examples/correlospinmatrix_detector_two_neuron.py | 12 | 2587 | # -*- coding: utf-8 -*-
#
# correlospinmatrix_detector_two_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Correlospinmatrix detector example
----------------------------------------
This scripts simulates two connected binary neurons, similar
as in [1]_. It measures and plots the auto- and cross covariance functions
of the individual neurons and between them, repsectively.
References
~~~~~~~~~~~~
.. [1] Ginzburg and Sompolinsky (1994). Theory of correlations in stochastic neural netoworks. 50(4) p. 3175. Fig. 1.
"""
import matplotlib.pyplot as plt
import nest
import numpy as np
m_x = 0.5
tau_m = 10.
h = 0.1
T = 1000000.
tau_max = 100.
csd = nest.Create("correlospinmatrix_detector")
csd.set(N_channels=2, tau_max=tau_max, Tstart=tau_max, delta_tau=h)
nest.SetDefaults('ginzburg_neuron', {'theta': 0.0, 'tau_m': tau_m,
'c_1': 0.0, 'c_2': 2. * m_x, 'c_3': 1.0})
n1 = nest.Create("ginzburg_neuron")
nest.SetDefaults("mcculloch_pitts_neuron", {'theta': 0.5, 'tau_m': tau_m})
n2 = nest.Create("mcculloch_pitts_neuron")
nest.Connect(n1, n2, syn_spec={"weight": 1.0})
nest.Connect(n1, csd, syn_spec={"receptor_type": 0})
nest.Connect(n2, csd, syn_spec={"receptor_type": 1})
nest.Simulate(T)
c = csd.get("count_covariance")
m = np.zeros(2, dtype=float)
for i in range(2):
m[i] = c[i][i][int(tau_max / h)] * (h / T)
print('mean activities =', m)
cmat = np.zeros((2, 2, int(2 * tau_max / h) + 1), dtype=float)
for i in range(2):
for j in range(2):
cmat[i, j] = c[i][j] * (h / T) - m[i] * m[j]
ts = np.arange(-tau_max, tau_max + h, h)
plt.title("auto- and cross covariance functions")
plt.plot(ts, cmat[0, 1], 'r', label=r"$c_{12}$")
plt.plot(ts, cmat[1, 0], 'b', label=r"$c_{21}$")
plt.plot(ts, cmat[0, 0], 'g', label=r"$c_{11}$")
plt.plot(ts, cmat[1, 1], 'y', label=r"$c_{22}$")
plt.xlabel(r"time $t \; \mathrm{ms}$")
plt.ylabel(r"$c$")
plt.legend()
plt.show()
| gpl-2.0 |
manpen/hypergen | libs/NetworKit/version.py | 1 | 2272 | name='networkit'
version='4.1.1'
url='https://networkit.iti.kit.edu/'
download_url='https://pypi.python.org/pypi/networkit'
license='MIT'
author='Christian L. Staudt, Henning Meyerhenke'
author_email = '[email protected], [email protected]'
description = 'NetworKit is a toolbox for high-performance network analysis'
long_description = """
NetworKit is a growing open-source toolkit for high-performance network analysis.
Its aim is to provide tools for the analysis of large networks in the size range
from thousands to billions of edges. For this purpose, it implements efficient
graph algorithms, many of them parallel to utilize multicore architectures. These
are meant to compute standard measures of network analysis, such as degree
sequences, clustering coefficients and centrality. In
this respect, NetworKit is comparable to packages such as NetworkX, albeit with a
focus on parallelism and scalability. NetworKit is also a testbed for algorithm
engineering and contains a few novel algorithms from recently published
research, especially in the area of community detection."""
keywords = ['graph algorithm', 'network analysis', 'social network']
platforms = 'any'
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Other Environment',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent', 'Programming Language :: C++',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
]
install_requires = []
# not used because it can mess up pip's installation process
# therefore, setup.py prints warnings for each package missing (tabulate, readline, scipy, numpy, matplotlib, networkx)
# or terminates the installation process (gcc, scons and cython)
| gpl-3.0 |
tectronics/watershed-priorities | util/scale_sensitivity_rand.py | 7 | 5775 | from django.core.management import setup_environ
import os
import sys
sys.path.append(os.path.dirname(os.path.join('..','wp',__file__)))
import settings
setup_environ(settings)
#==================================#
from arp.models import WatershedPrioritization, ConservationFeature, PlanningUnit, Cost, PuVsCf, PuVsCost
from django.contrib.auth.models import User
from django.utils import simplejson as json
from django.conf import settings
import time
import random
def mean(alist):
floatNums = [float(x) for x in alist]
return sum(floatNums) / len(alist)
user, created = User.objects.get_or_create(username='tester')
scalefactors = []
num_species = []
num_units = []
factors = [0.2, 0.5, 0.75, 1.0, 1.25, 2, 4, 8]
numspecies = ['all', 'all', 1, 2, 4, 8, 16, 32]
numcosts = [1,2,3]
# these are random
targets = [0.25, 0.5, 0.75]
penalties = [0.01, 0.25, 0.5, 0.75, 1.0]
settings.MARXAN_NUMREPS = 1
#MODE = 'hardcoded'
#MODE = 'query'
MODE = 'create'
if MODE == 'query':
wp = WatershedPrioritization.objects.filter(name__startswith="Auto Test Scale Factor")
for w in wp:
print "Querying", w.name, w
scalefactors.append(w.input_scalefactor)
r = w.results
num_species.append(r['num_met'])
num_units.append(r['num_units'])
w.kml
COUNT = 0
def create_wp(target_dict, penalties_dict, costs_dict, sf):
global COUNT
COUNT += 1
wp = WatershedPrioritization(input_targets = json.dumps(
target_dict
),
input_penalties = json.dumps(
penalties_dict
),
input_relativecosts=json.dumps(
costs_dict
),
input_scalefactor=sf,
name="Auto Test Scale Factor %s" % sf, user=user)
return wp
if MODE == 'create':
wp = WatershedPrioritization.objects.filter(name__startswith="Auto Test Scale Factor")
wp.delete()
cfs = ConservationFeature.objects.all()
keys = []
for c in cfs:
a = c.level_string
while a.endswith('---'):
a = a[:-3]
keys.append(a)
fh = open("./results_%s.csv" % time.time(), 'w+')
fh.write('ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, meantarget, nspeciesmet, nplanningunits')
fh.write('\n')
fh.flush()
for i in range(200000):
f = random.random() * 10.0
nc = random.choice([1,2,3])
if random.choice([True,False]):
numspecies = 'all'
else:
if random.choice([True,False]):
numspecies = random.randint(1,3)
else:
numspecies = random.randint(1,50)
try:
n = int(numspecies)
target_dict = {}
penalty_dict = {}
# pick n random species
selected_key = random.sample(keys, n) #'blah---blah'
if random.choice([True,False]):
t = random.choice(targets)
p = random.choice(penalties)
else:
t = None
p = None
for key in selected_key:
if t and p:
# Use the predetermined for ALL species
target_dict[key] = t
penalty_dict[key] = p
else:
# random for each species
target_dict[key] = random.choice(targets)
penalty_dict[key] = random.choice(penalties)
except ValueError:
# ALL species
t = random.choice(targets)
p = random.choice(penalties)
t2 = random.choice(targets)
p2 = random.choice(penalties)
target_dict = { "locally-endemic":t, "widespread":t2 }
penalty_dict = { "locally-endemic":p, "widespread":p2 }
costs_dict = { "watershed-condition":0, "invasives":0, "climate":0 }
for a in random.sample(costs_dict.keys(), nc):
costs_dict[a] = 1
sf = f
wp = create_wp(target_dict, penalty_dict, costs_dict, sf)
############
print "####################################"
print 'targets', wp.input_targets
print 'penalties', wp.input_penalties
print 'costs', wp.input_relativecosts
wp.save()
while not wp.done:
time.sleep(2)
print " ", wp.status_html
inpenalties = json.loads(wp.input_penalties)
intargets = json.loads(wp.input_targets)
if 'widespread' in inpenalties.keys():
nspecies = 71
else:
nspecies = len(inpenalties.keys())
r = wp.results
#'ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, meantarget, numspeciesmet, numplannningunits'
fh.write(','.join([str(x) for x in [
sum(json.loads(wp.input_relativecosts).values()),
nspecies,
sum(inpenalties.values()),
mean(inpenalties.values()),
wp.input_scalefactor,
mean(intargets.values()),
r['num_met'],
r['num_units']
]]))
fh.write('\n')
fh.flush()
if MODE == 'hardcoded':
scalefactors = [0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.25, 1.5, 2, 4, 8, 16, 32]
num_units = [0, 3, 9, 17, 46, 57, 63, 73, 76, 79, 81, 82, 82, 83, 85, 90, 92, 93, 91]
num_species = [0, 1, 4, 10, 27, 38, 37, 54, 57, 58, 63, 59, 62, 66, 66, 69, 71, 71, 71]
assert len(scalefactors) == len(num_species) == len(num_units)
print scalefactors
print num_units
print num_species
#import matplotlib.pyplot as plt
#fig = plt.figure()
#plt.xlabel('Scale Factor')
#plt.ylabel('Number of Species Goals Met')
#ax = fig.add_subplot(111)
#ax.scatter(scalefactors, num_species)
#ax.set_xscale('log')
#plt.show()
| bsd-3-clause |
timothydmorton/isochrones | isochrones/observation.py | 1 | 39280 | from __future__ import print_function, division
import os, re, sys
from .config import on_rtd
from .logger import getLogger
if not on_rtd:
import numpy as np
import pandas as pd
from configobj import ConfigObj
from asciitree import LeftAligned, Traversal
from asciitree.drawing import BoxStyle, BOX_DOUBLE, BOX_BLANK
from collections import OrderedDict
from itertools import chain, count
try:
from itertools import imap, izip
except ImportError: # Python 3
imap = map
izip = zip
xrange = range
else:
class Traversal(object):
pass
class LeftAligned(object):
pass
from .isochrone import get_ichrone
from .utils import addmags, distance, fast_addmags
LOG_ONE_OVER_ROOT_2PI = np.log(1.0 / np.sqrt(2 * np.pi))
class NodeTraversal(Traversal):
"""
Custom subclass to traverse tree for ascii printing
"""
def __init__(self, pars=None, **kwargs):
self.pars = pars
super(NodeTraversal, self).__init__(**kwargs)
def get_children(self, node):
return node.children
def get_root(self, node):
return node
return node.get_root()
def get_text(self, node):
text = node.label
if self.pars is not None:
if hasattr(node, "model_mag"):
text += "; model={:.2f} ({})".format(node.model_mag(self.pars), node.lnlike(self.pars))
if type(node) == ModelNode:
root = node.get_root()
if hasattr(root, "spectroscopy"):
if node.label in root.spectroscopy:
for k, v in root.spectroscopy[node.label].items():
text += ", {}={}".format(k, v)
modval = node.evaluate(self.pars[node.label], k)
lnl = -0.5 * (modval - v[0]) ** 2 / v[1] ** 2
text += "; model={} ({})".format(modval, lnl)
if node.label in root.limits:
for k, v in root.limits[node.label].items():
text += ", {} limits={}".format(k, v)
if hasattr(root, "parallax"):
if node.index in root.parallax:
# Warning, this not tested; may break ->
plx, u_plx = root.parallax[node.index]
text += ", parallax={}".format((plx, u_plx))
modval = node.evaluate(self.pars[node.label], "parallax")
lnl = -0.5 * (modval - plx) ** 2 / u_plx ** 2
text += "; model={} ({})".format(modval, lnl)
if hasattr(root, "AV"):
if node.index in root.AV:
# Warning, this not tested; may break ->
AV, u_AV = root.AV[node.index]
text += ", AV={}".format((AV, u_AV))
modval = node.evaluate(self.pars[node.label], "AV")
lnl = -0.5 * (modval - plx) ** 2 / u_AV ** 2
text += "; model={} ({})".format(modval, lnl)
text += ": {}".format(self.pars[node.label])
else:
if type(node) == ModelNode:
root = node.get_root()
if hasattr(root, "spectroscopy"):
if node.label in root.spectroscopy:
for k, v in root.spectroscopy[node.label].items():
text += ", {}={}".format(k, v)
if node.index in root.parallax:
text += ", parallax={}".format(root.parallax[node.index])
if node.index in root.AV:
text += ", AV={}".format(root.AV[node.index])
if node.label in root.limits:
for k, v in root.limits[node.label].items():
text += ", {} limits={}".format(k, v)
# root = node.get_root()
# if hasattr(root,'spectroscopy'):
# if node.label in root.spectroscopy:
# for k,v in root.spectroscopy[node.label].items():
# model = node.evaluate(self.pars[node.label], k)
# text += '\n {}={} (model={})'.format(k,v,model)
return text
class MyLeftAligned(LeftAligned):
"""For custom ascii tree printing
"""
pars = None
def __init__(self, pars=None, **kwargs):
self.pars = pars
self.traverse = NodeTraversal(pars)
super(MyLeftAligned, self).__init__(**kwargs)
class Node(object):
def __init__(self, label):
self.label = label
self.parent = None
self.children = []
self._leaves = None
def __iter__(self):
"""
Iterate through tree, leaves first
following http://stackoverflow.com/questions/6914803/python-iterator-through-tree-with-list-of-children
"""
for node in chain(*imap(iter, self.children)):
yield node
yield self
def __getitem__(self, ind):
for n, i in izip(self, count()):
if i == ind:
return n
@property
def is_root(self):
return self.parent is None
def get_root(self):
if self.is_root:
return self
else:
return self.parent.get_root()
def get_ancestors(self):
if self.parent.is_root:
return []
else:
return [self.parent] + self.parent.get_ancestors()
def print_ascii(self, fout=None, pars=None):
box_tr = MyLeftAligned(pars, draw=BoxStyle(gfx=BOX_DOUBLE, horiz_len=1))
if fout is None:
print(box_tr(self))
else:
fout.write(box_tr(self))
@property
def is_leaf(self):
return len(self.children) == 0 and not self.is_root
def _clear_leaves(self):
self._leaves = None
def _clear_all_leaves(self):
if not self.is_root:
self.parent._clear_all_leaves()
self._clear_leaves()
def add_child(self, node):
node.parent = self
self.children.append(node)
self._clear_all_leaves()
def remove_children(self):
self.children = []
self._clear_all_leaves()
def remove_child(self, label):
"""
Removes node by label
"""
ind = None
for i, c in enumerate(self.children):
if c.label == label:
ind = i
if ind is None:
getLogger().warning("No child labeled {}.".format(label))
return
self.children.pop(ind)
self._clear_all_leaves()
def attach_to_parent(self, node):
# detach from current parent, if necessary
if self.parent is not None:
self.parent.remove_child(self.label)
node.children += [self]
self.parent = node
self._clear_all_leaves()
@property
def leaves(self):
if self._leaves is None:
self._leaves = self._get_leaves()
return self._leaves
def _get_leaves(self):
if self.is_leaf:
return [self]
else:
leaves = []
for c in self.children:
leaves += c._get_leaves()
return leaves
def select_leaves(self, name):
"""Returns all leaves under all nodes matching name
"""
if self.is_leaf:
return [self] if re.search(name, self.label) else []
else:
leaves = []
if re.search(name, self.label):
for c in self.children:
leaves += c._get_leaves() # all leaves
else:
for c in self.children:
leaves += c.select_leaves(name) # only matching ones
return leaves
@property
def leaf_labels(self):
return [l.label for l in self.leaves]
def get_leaf(self, label):
for l in self.leaves:
if label == l.label:
return l
def get_obs_nodes(self):
return [l for l in self if isinstance(l, ObsNode)]
@property
def obs_leaf_nodes(self):
return self.get_obs_leaves()
def get_obs_leaves(self):
"""Returns the last obs nodes that are leaves
"""
obs_leaves = []
for n in self:
if n.is_leaf:
if isinstance(n, ModelNode):
l = n.parent
else:
l = n
if l not in obs_leaves:
obs_leaves.append(l)
return obs_leaves
def get_model_nodes(self):
return [l for l in self._get_leaves() if isinstance(l, ModelNode)]
@property
def N_model_nodes(self):
return len(self.get_model_nodes())
def print_tree(self):
print(self.label)
def __str__(self):
return self.label
def __repr__(self):
if self.is_leaf:
s = "<{} '{}', parent='{}'>".format(self.__class__, self.label, self.parent)
else:
child_labels = [str(c) for c in self.children]
s = "<{} '{}', parent='{}', children={}>".format(
self.__class__, self.label, self.parent, child_labels
)
return s
class ObsNode(Node):
def __init__(self, observation, source, ref_node=None):
self.observation = observation
self.source = source
self.reference = ref_node
self.children = []
self.parent = None
self._leaves = None
# indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
# for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def instrument(self):
return self.observation.name
@property
def band(self):
return self.observation.band
@property
def value(self):
return (self.source.mag, self.source.e_mag)
@property
def resolution(self):
return self.observation.resolution
@property
def relative(self):
return self.source.relative
@property
def separation(self):
return self.source.separation
@property
def pa(self):
return self.source.pa
@property
def value_str(self):
return "({:.2f}, {:.2f})".format(*self.value)
def distance(self, other):
"""Coordinate distance from another ObsNode
"""
return distance((self.separation, self.pa), (other.separation, other.pa))
def _in_same_observation(self, other):
return self.instrument == other.instrument and self.band == other.band
@property
def n_params(self):
if self._n_params is None:
self._n_params = 5 * len(self.leaves)
return self._n_params
def _get_inds(self):
inds = [n.index for n in self.leaves]
inds = sorted(list(set(inds)))
return inds
def _clear_leaves(self):
self._leaves = None
self._inds = None
self._n_params = None
self._Nstars = None
@property
def Nstars(self):
"""
dictionary of number of stars per system
"""
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars
@property
def systems(self):
lst = sorted(self.Nstars.keys())
return lst
@property
def inds(self):
if self._inds is None:
self._inds = self._get_inds()
return self._inds
@property
def label(self):
if self.source.relative:
band_str = "delta-{}".format(self.band)
else:
band_str = self.band
return "{} {}={} @({:.2f}, {:.0f} [{:.2f}])".format(
self.instrument, band_str, self.value_str, self.separation, self.pa, self.resolution
)
@property
def obsname(self):
return "{}-{}".format(self.instrument, self.band)
def get_system(self, ind):
system = []
for l in self.get_root().leaves:
try:
if l.index == ind:
system.append(l)
except AttributeError:
pass
return system
def add_model(self, ic, N=1, index=0):
"""
Should only be able to do this to a leaf node.
Either N and index both integers OR index is
list of length=N
"""
if type(index) in [list, tuple]:
if len(index) != N:
raise ValueError("If a list, index must be of length N.")
else:
index = [index] * N
for idx in index:
existing = self.get_system(idx)
tag = len(existing)
self.add_child(ModelNode(ic, index=idx, tag=tag))
def model_mag(self, model_values, use_cache=True):
"""
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
# if pardict == self._cache_key and use_cache:
# #print('{}: using cached'.format(self))
# return self._cache_val
# #print('{}: calculating'.format(self))
# self._cache_key = pardict
return addmags(*[model_values[n.label][self.band] for n in self.leaves])
def lnlike(self, model_values, use_cache=True):
"""
returns log-likelihood of this observation
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
mag, dmag = self.value
if np.isnan(dmag):
return 0
if self.relative:
# If this *is* the reference, just return
if self.reference is None:
return 0
mod = self.model_mag(model_values, use_cache=use_cache) - self.reference.model_mag(
model_values, use_cache=use_cache
)
mag -= self.reference.value[0]
else:
mod = self.model_mag(model_values, use_cache=use_cache)
lnl = -0.5 * (mag - mod) ** 2 / dmag ** 2 + LOG_ONE_OVER_ROOT_2PI + np.log(dmag)
# getLogger().debug('{} {}: mag={}, mod={}, lnlike={}'.format(self.instrument,
# self.band,
# mag,mod,lnl))
return lnl
class DummyObsNode(ObsNode):
def __init__(self, *args, **kwargs):
self.observation = None
self.source = None
self.reference = None
self.children = []
self.parent = None
self._leaves = None
# indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
# for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def label(self):
return "[dummy]"
@property
def value(self):
return None, None
def lnlike(self, *args, **kwargs):
return 0
class ModelNode(Node):
"""
These are always leaves; leaves are always these.
Index keeps track of which physical system node is in.
"""
def __init__(self, ic, index=0, tag=0):
self._ic = ic
self.index = index
self.tag = tag
self.children = []
self.parent = None
self._leaves = None
@property
def label(self):
return "{}_{}".format(self.index, self.tag)
@property
def ic(self):
if type(self._ic) == type:
self._ic = self._ic()
return self._ic
def get_obs_ancestors(self):
nodes = self.get_ancestors()
return [n for n in nodes if isinstance(n, ObsNode)]
@property
def contributing_observations(self):
"""The instrument-band for all the observations feeding into this model node
"""
return [n.obsname for n in self.get_obs_ancestors()]
def evaluate(self, p, prop):
if prop in self.ic.bands:
return self.evaluate_mag(p, prop)
elif prop == "mass":
return p[0]
elif prop == "age":
return p[1]
elif prop == "feh":
return p[2]
elif prop in ["Teff", "logg", "radius", "density"]:
return getattr(self.ic, prop)(*p[:3])
else:
raise ValueError("property {} cannot be evaluated by Isochrone.".format(prop))
def evaluate_mag(self, p, band):
return self.ic.mag[band](*p)
def lnlike(self, *args, **kwargs):
return 0
class Source(object):
def __init__(self, mag, e_mag, separation=0.0, pa=0.0, relative=False, is_reference=False):
self.mag = float(mag)
self.e_mag = float(e_mag)
self.separation = float(separation)
self.pa = float(pa)
self.relative = bool(relative)
self.is_reference = bool(is_reference)
def __str__(self):
return "({}, {}) @({}, {})".format(self.mag, self.e_mag, self.separation, self.pa)
def __repr__(self):
return self.__str__()
class Star(object):
"""Theoretical counterpart of Source.
"""
def __init__(self, pars, separation, pa):
self.pars = pars
self.separation = separation
self.pa = pa
def distance(self, other):
return distance((self.separation, self.pa), (other.separation, other.pa))
class Observation(object):
"""
Contains relevant information about imaging observation
name: identifying string (typically the instrument)
band: photometric bandpass
resolution: *approximate* angular resolution of instrument.
used for source matching between observations
sources: list of Source objects
"""
def __init__(self, name, band, resolution, sources=None, relative=False):
self.name = name
self.band = band
self.resolution = resolution
if sources is not None:
if not np.all(type(s) == Source for s in sources):
raise ValueError("Source list must be all Source objects.")
self.sources = []
if sources is None:
sources = []
for s in sources:
self.add_source(s)
self.relative = relative
self._set_reference()
def observe(self, stars, unc, ic=None):
"""Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now)
"""
if ic is None:
ic = get_ichrone("mist")
if len(stars) > 2:
raise NotImplementedError("No support yet for > 2 synthetic stars")
mags = [ic(*s.pars)["{}_mag".format(self.band)].values[0] for s in stars]
d = stars[0].distance(stars[1])
if d < self.resolution:
mag = addmags(*mags) + unc * np.random.randn()
sources = [Source(mag, unc, stars[0].separation, stars[0].pa, relative=self.relative)]
else:
mags = np.array([m + unc * np.random.randn() for m in mags])
if self.relative:
mags -= mags.min()
sources = [
Source(m, unc, s.separation, s.pa, relative=self.relative) for m, s in zip(mags, stars)
]
for s in sources:
self.add_source(s)
self._set_reference()
def add_source(self, source):
"""
Adds source to observation, keeping sorted order (in separation)
"""
if not type(source) == Source:
raise TypeError("Can only add Source object.")
if len(self.sources) == 0:
self.sources.append(source)
else:
ind = 0
for s in self.sources:
# Keep sorted order of separation
if source.separation < s.separation:
break
ind += 1
self.sources.insert(ind, source)
# self._set_reference()
@property
def brightest(self):
mag0 = np.inf
s0 = None
for s in self.sources:
if s.mag < mag0:
mag0 = s.mag
s0 = s
return s0
def _set_reference(self):
"""If relative, make sure reference node is set to brightest.
"""
if len(self.sources) > 0:
self.brightest.is_reference = True
def __str__(self):
return "{}-{}".format(self.name, self.band)
def __repr__(self):
return str(self)
class ObservationTree(Node):
"""Builds a tree of Nodes from a list of Observation objects
Organizes Observations from smallest to largest resolution,
and at each stage attaches each source to the most probable
match from the previous Observation. Admittedly somewhat hack-y,
but should *usually* do the right thing. Check out `obs.print_ascii()`
to visualize what this has done.
"""
spec_props = ["Teff", "logg", "feh", "density"]
def __init__(self, observations=None, name=None):
if observations is None:
observations = []
if name is None:
self.label = "root"
else:
self.label = name
self.parent = None
self._observations = []
self._build_tree()
[self.add_observation(obs) for obs in observations]
self._N = None
self._index = None
# Spectroscopic properties
self.spectroscopy = {}
# Limits (such as minimum on logg)
self.limits = {}
# Parallax measurements
self.parallax = {}
# AV priors
self.AV = {}
# This will be calculated and set at first access
self._Nstars = None
# likelihood cache
self._cache_key = None
self._cache_val = None
@property
def name(self):
return self.label
def _clear_cache(self):
self._cache_key = None
self._cache_val = None
@classmethod
def from_df(cls, df, **kwargs):
"""
DataFrame must have the right columns.
these are: name, band, resolution, mag, e_mag, separation, pa
"""
tree = cls(**kwargs)
for (n, b), g in df.groupby(["name", "band"]):
# g.sort('separation', inplace=True) #ensures that the first is reference
sources = [
Source(**s[["mag", "e_mag", "separation", "pa", "relative"]]) for _, s in g.iterrows()
]
obs = Observation(n, b, g.resolution.mean(), sources=sources, relative=g.relative.any())
tree.add_observation(obs)
# For all relative mags, set reference to be brightest
return tree
@classmethod
def from_ini(cls, filename):
config = ConfigObj(filename)
def to_df(self):
"""
Returns DataFrame with photometry from observations organized.
This DataFrame should be able to be read back in to
reconstruct the observation.
"""
df = pd.DataFrame()
name = []
band = []
resolution = []
mag = []
e_mag = []
separation = []
pa = []
relative = []
for o in self._observations:
for s in o.sources:
name.append(o.name)
band.append(o.band)
resolution.append(o.resolution)
mag.append(s.mag)
e_mag.append(s.e_mag)
separation.append(s.separation)
pa.append(s.pa)
relative.append(s.relative)
return pd.DataFrame(
{
"name": name,
"band": band,
"resolution": resolution,
"mag": mag,
"e_mag": e_mag,
"separation": separation,
"pa": pa,
"relative": relative,
}
)
def save_hdf(self, filename, path="", overwrite=False, append=False):
"""
Writes all info necessary to recreate object to HDF file
Saves table of photometry in DataFrame
Saves model specification, spectroscopy, parallax to attrs
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError(
"{} in {} exists. Set either overwrite or append option.".format(path, filename)
)
else:
store.close()
df = self.to_df()
df.to_hdf(filename, path + "/df", format="table")
with pd.HDFStore(filename) as store:
# store = pd.HDFStore(filename)
attrs = store.get_storer(path + "/df").attrs
attrs.spectroscopy = self.spectroscopy
attrs.parallax = self.parallax
attrs.AV = self.AV
attrs.N = self._N
attrs.index = self._index
store.close()
@classmethod
def load_hdf(cls, filename, path="", ic=None):
"""
Loads stored ObservationTree from file.
You can provide the isochrone to use; or it will default to MIST
TODO: saving and loading must be fixed! save ic type, bands, etc.
"""
store = pd.HDFStore(filename)
try:
samples = store[path + "/df"]
attrs = store.get_storer(path + "/df").attrs
except:
store.close()
raise
df = store[path + "/df"]
new = cls.from_df(df)
if ic is None:
ic = get_ichrone("mist")
new.define_models(ic, N=attrs.N, index=attrs.index)
new.spectroscopy = attrs.spectroscopy
new.parallax = attrs.parallax
new.AV = attrs.AV
store.close()
return new
def add_observation(self, obs):
"""Adds an observation to observation list, keeping proper order
"""
if len(self._observations) == 0:
self._observations.append(obs)
else:
res = obs.resolution
ind = 0
for o in self._observations:
if res > o.resolution:
break
ind += 1
self._observations.insert(ind, obs)
self._build_tree()
self._clear_cache()
def add_spectroscopy(self, label="0_0", **props):
"""
Adds spectroscopic measurement to particular star(s) (corresponding to individual model node)
Default 0_0 should be primary star
legal inputs are 'Teff', 'logg', 'feh', and in form (val, err)
"""
if label not in self.leaf_labels:
raise ValueError(
"No model node named {} (must be in {}). Maybe define models first?".format(
label, self.leaf_labels
)
)
for k, v in props.items():
if k not in self.spec_props:
raise ValueError("Illegal property {} (only {} allowed).".format(k, self.spec_props))
if len(v) != 2:
raise ValueError("Must provide (value, uncertainty) for {}.".format(k))
if label not in self.spectroscopy:
self.spectroscopy[label] = {}
for k, v in props.items():
self.spectroscopy[label][k] = v
self._clear_cache()
def add_limit(self, label="0_0", **props):
"""Define limits to spectroscopic property of particular stars.
Usually will be used for 'logg', but 'Teff' and 'feh' will also work.
In form (min, max): e.g., t.add_limit(logg=(3.0,None))
None will be converted to (-)np.inf
"""
if label not in self.leaf_labels:
raise ValueError(
"No model node named {} (must be in {}). Maybe define models first?".format(
label, self.leaf_labels
)
)
for k, v in props.items():
if k not in self.spec_props:
raise ValueError("Illegal property {} (only {} allowed).".format(k, self.spec_props))
if len(v) != 2:
raise ValueError("Must provide (min, max) for {}. (`None` is allowed value)".format(k))
if label not in self.limits:
self.limits[label] = {}
for k, v in props.items():
vmin, vmax = v
if vmin is None:
vmin = -np.inf
if vmax is None:
vmax = np.inf
self.limits[label][k] = (vmin, vmax)
self._clear_cache()
def add_parallax(self, plax, system=0):
if len(plax) != 2:
raise ValueError("Must enter (value,uncertainty).")
if system not in self.systems:
raise ValueError("{} not in systems ({}).".format(system, self.systems))
self.parallax[system] = plax
self._clear_cache()
def add_AV(self, AV, system=0):
if len(AV) != 2:
raise ValueError("Must enter (value,uncertainty).")
if system not in self.systems:
raise ValueError("{} not in systems ({}).".format(system, self.systems))
self.AV[system] = AV
self._clear_cache()
def define_models(self, ic, leaves=None, N=1, index=0):
"""
N, index are either integers or lists of integers.
N : number of model stars per observed star
index : index of physical association
leaves: either a list of leaves, or a pattern by which
the leaves are selected (via `select_leaves`)
If these are lists, then they are defined individually for
each leaf.
If `index` is a list, then each entry must be either
an integer or a list of length `N` (where `N` is the corresponding
entry in the `N` list.)
This bugs up if you call it multiple times. If you want
to re-do a call to this function, please re-define the tree.
"""
self.clear_models()
if leaves is None:
leaves = self._get_leaves()
elif type(leaves) == type(""):
leaves = self.select_leaves(leaves)
# Sort leaves by distance, to ensure system 0 will be assigned
# to the main reference star.
if np.isscalar(N):
N = np.ones(len(leaves)) * N
# if np.size(index) > 1:
# index = [index]
N = np.array(N).astype(int)
if np.isscalar(index):
index = np.ones_like(N) * index
index = np.array(index).astype(int)
# Add the appropriate number of model nodes to each
# star in the highest-resoluion image
for s, n, i in zip(leaves, N, index):
# Remove any previous model nodes (should do some checks here?)
s.remove_children()
s.add_model(ic, n, i)
# For each system, make sure tag _0 is the brightest.
self._fix_labels()
self._N = N
self._index = index
self._clear_all_leaves()
def _fix_labels(self):
"""For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image
"""
for s in self.systems:
mag0 = np.inf
n0 = None
for n in self.get_system(s):
if isinstance(n.parent, DummyObsNode):
continue
mag, _ = n.parent.value
if mag < mag0:
mag0 = mag
n0 = n
# If brightest is not tag _0, then switch them.
if n0 is not None and n0.tag != 0:
n_other = self.get_leaf("{}_{}".format(s, 0))
n_other.tag = n0.tag
n0.tag = 0
def get_system(self, ind):
system = []
for l in self.leaves:
try:
if l.index == ind:
system.append(l)
except AttributeError:
pass
return system
@property
def observations(self):
return self._observations
def select_observations(self, name):
"""Returns nodes whose instrument-band matches 'name'
"""
return [n for n in self.get_obs_nodes() if n.obsname == name]
def clear_models(self):
for n in self:
if isinstance(n, ModelNode):
n.parent.remove_child(n.label)
self._clear_all_leaves()
def trim(self):
"""
Trims leaves from tree that are not observed at highest-resolution level
This is a bit hacky-- what it does is
"""
# Only allow leaves to stay on list (highest-resolution) level
return
for l in self._levels[-2::-1]:
for n in l:
if n.is_leaf:
n.parent.remove_child(n.label)
self._clear_all_leaves() # clears cached list of leaves
def p2pardict(self, p):
"""
Given leaf labels, turns parameter vector into pardict
"""
d = {}
N = self.Nstars
i = 0
for s in self.systems:
age, feh, dist, AV = p[i + N[s] : i + N[s] + 4]
for j in xrange(N[s]):
l = "{}_{}".format(s, j)
mass = p[i + j]
d[l] = [mass, age, feh, dist, AV]
i += N[s] + 4
return d
def pardict2p(self, pardict):
"""Convert from dictionary back to flat parameter vector
"""
pars = []
N = self.Nstars
for s in self.systems:
for i in range(N[s]):
star = "{}_{}".format(s, i)
pars.append(pardict[star][0])
pars += pardict["{}_0".format(s)][1:]
return pars
@property
def param_description(self):
N = self.Nstars
pars = []
for s in self.systems:
for j in xrange(N[s]):
pars.append("eep_{}_{}".format(s, j))
for p in ["age", "feh", "distance", "AV"]:
pars.append("{}_{}".format(p, s))
return pars
@property
def Nstars(self):
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars
@property
def systems(self):
# fix this! make sure it is unique!!!
lst = list(chain(*[c.systems for c in self.children]))
return sorted(set(lst))
def print_ascii(self, fout=None, p=None):
pardict = None
if p is not None:
pardict = self.p2pardict(p)
super(ObservationTree, self).print_ascii(fout, pardict)
def lnlike(self, p, model_values, use_cache=True):
"""
takes parameter vector, constructs pardict, returns sum of lnlikes of non-leaf nodes
"""
pardict = self.p2pardict(p) if type(p) is not dict else p
# TODO: do we still want caching?
# if use_cache and self._cache_key is not None and np.all(p==self._cache_key):
# return self._cache_val
# self._cache_key = p
# lnlike from photometry
lnl = 0
for n in self:
if n is not self:
lnl += n.lnlike(model_values, use_cache=use_cache)
if not np.isfinite(lnl):
self._cache_val = -np.inf
return -np.inf
# lnlike from spectroscopy
for l in self.spectroscopy:
for prop, (val, err) in self.spectroscopy[l].items():
mod = model_values[l][prop]
lnl += -0.5 * (val - mod) ** 2 / err ** 2 + LOG_ONE_OVER_ROOT_2PI + np.log(err)
if not np.isfinite(lnl):
self._cache_val = -np.inf
return -np.inf
# enforce limits
for l in self.limits:
for prop, (vmin, vmax) in self.limits[l].items():
mod = model_values[l][prop]
if mod < vmin or mod > vmax or not np.isfinite(mod):
self._cache_val = -np.inf
return -np.inf
# lnlike from parallax
for s, (val, err) in self.parallax.items():
dist = pardict["{}_0".format(s)][3]
mod = 1.0 / dist * 1000.0
lnl += -0.5 * (val - mod) ** 2 / err ** 2 + LOG_ONE_OVER_ROOT_2PI + np.log(err)
# lnlike from AV
for s, (val, err) in self.AV.items():
AV = pardict["{}_0".format(s)][4]
lnl += -0.5 * (val - AV) ** 2 / err ** 2 + LOG_ONE_OVER_ROOT_2PI + np.log(err)
if not np.isfinite(lnl):
self._cache_val = -np.inf
return -np.inf
self._cache_val = lnl
return lnl
def _find_closest(self, n0):
"""returns the node in the tree that is closest to n0, but not
in the same observation
"""
dmin = np.inf
nclose = None
ds = []
nodes = []
ds.append(np.inf)
nodes.append(self)
for n in self:
if n is n0:
continue
try:
if n._in_same_observation(n0):
continue
ds.append(n.distance(n0))
nodes.append(n)
except AttributeError:
pass
inds = np.argsort(ds)
ds = [ds[i] for i in inds]
nodes = [nodes[i] for i in inds]
for d, n in zip(ds, nodes):
try:
if d < n.resolution or n.resolution == -1:
return n
except AttributeError:
pass
# If nothing else works
return self
def _build_tree(self):
# reset leaf cache, children
self._clear_all_leaves()
self.children = []
for i, o in enumerate(self._observations):
s0 = o.brightest
ref_node = ObsNode(o, s0)
for s in o.sources:
if s.relative and not s.is_reference:
node = ObsNode(o, s, ref_node=ref_node)
elif s.relative and s.is_reference:
node = ref_node
else:
node = ObsNode(o, s)
# For first level, no need to choose parent
if i == 0:
parent = self
else:
# Find parent (closest node in tree)
parent = self._find_closest(node)
parent.add_child(node)
# If after all this, there are no `ObsNode` nodes,
# then add a dummy.
if len(self.get_obs_nodes()) == 0:
self.add_child(DummyObsNode())
@classmethod
def synthetic(cls, stars, surveys):
pass
| mit |
dsquareindia/scikit-learn | sklearn/cross_validation.py | 6 | 72259 |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .utils.random import choice
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneOut` instead.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePOut` instead.
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.KFold` instead.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupKFold` instead.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedKFold` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneGroupOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePGroupsOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ShuffleSplit` instead.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedShuffleSplit` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
for n in range(self.n_iter):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(cls_count, self.n_train, rng)
class_counts_remaining = cls_count - n_i
t_i = _approximate_mode(class_counts_remaining, self.n_test, rng)
train = []
test = []
for i, _ in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
perm_indices_class_i = np.where(
(i == self.y_indices))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.PredefinedSplit` instead.
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupShuffleSplit` instead.
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_predict` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return safe_indexing(y, ind)
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.check_cv` instead.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` is used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.permutation_test_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.train_test_split` instead.
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
funbaker/astropy | docs/conf.py | 1 | 9728 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_docs" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
from datetime import datetime
import os
ON_RTD = os.environ.get('READTHEDOCS') == 'True'
ON_TRAVIS = os.environ.get('TRAVIS') == 'true'
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
import os
import sys
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# If that doesn't work trying to import from astropy_helpers below will
# still blow up
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
import astropy
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.2.1")
# The intersphinx_mapping in astropy_helpers.sphinx.conf refers to astropy for
# the benefit of affiliated packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy']
# add any custom intersphinx for astropy
intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/latest/', None)
intersphinx_mapping['ipython'] = ('http://ipython.readthedocs.io/en/stable/', None)
intersphinx_mapping['pandas'] = ('http://pandas.pydata.org/pandas-docs/stable/', None)
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('_pkgtemplate.rst')
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
.. |minimum_numpy_version| replace:: {0.__minimum_numpy_version__}
.. Astropy
.. _Astropy: http://astropy.org
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
""".format(astropy)
# -- Project information ------------------------------------------------------
project = u'Astropy'
author = u'The Astropy Developers'
copyright = u'2011–{0}, '.format(datetime.utcnow().year) + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = astropy.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = astropy.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
#html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ----------------------------------------
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
# Don't import the module as "version" or it will override the
# "version" configuration parameter
from astropy import version as versionmod
edit_on_github_project = "astropy/astropy"
if versionmod.release:
edit_on_github_branch = "v{0}.{1}.x".format(
versionmod.major, versionmod.minor)
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
edit_on_github_skip_regex = '_.*|api/.*'
github_issues_url = 'https://github.com/astropy/astropy/issues/'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'http://matplotlib.org/',
'numpy': 'http://docs.scipy.org/doc/numpy/',
},
'abort_on_example_error': True
}
except ImportError:
def setup(app):
app.warn('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
linkcheck_anchors = False
| bsd-3-clause |
shareactorIO/pipeline | source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/SkFlow_DEPRECATED/iris_val_based_early_stopping.py | 5 | 2135 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.python.platform import googletest
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
import skflow
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = skflow.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200,
n_classes=3)
# classifier with early stopping on training data
classifier1 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier1.fit(X_train, y_train)
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier2.fit(X_train, y_train, val_monitor)
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# in many applications, the score is improved by using early stopping on val data
print(score2 > score1)
| apache-2.0 |
arg-hya/taxiCab | Tools/Misc/ConvertLatLong_PresentTrip.py | 1 | 1075 | import numpy as np
import pycrs
import mpl_toolkits.basemap.pyproj as pyproj # Import the pyproj module
import shapefile as shp
import matplotlib.pyplot as plt
projobj = pycrs.loader.from_file(r'D:\TaxiCab\mycode\python\taxi_zones\taxi_zones.prj')
proj4string = projobj.to_proj4()
print(proj4string)
isn2004=pyproj.Proj(proj4string, preserve_units=True)
wgs84=pyproj.Proj("+init=EPSG:4326")
if __name__ == "__main__":
target = open("trip_data_1_present_trip_end_location_converted.txt",'w')
target.write(r"converted_longitude,converted_latitude")
target.write("\n")
with open("trip_data_1_present_trip_end_location.txt") as f:
next(f)
for line in f:
strings = line.split(",")
co1 = float(strings[0])
co2 = float(strings[1])
x2,y2 = pyproj.transform(wgs84,isn2004 ,co1,co2)
target.write(str(x2))
target.write(",")
target.write(str(y2))
target.write("\n")
target.close()
| gpl-3.0 |
k-nish/number_recognizer | tests/test_machine_loader.py | 3 | 1450 | import os
import unittest
from sklearn import datasets
from machines.machine_loader import MachineLoader
import machines.number_recognizer
class TestMachineLoader(unittest.TestCase):
def test_load(self):
machine = MachineLoader.load(machines.number_recognizer)
self.assertTrue(machine)
def test_feedback(self):
test_file = "test_feedback.txt"
feedback_file = MachineLoader.feedback(machines.number_recognizer, None, file_name=test_file)
if os.path.isfile(feedback_file):
os.remove(feedback_file)
data = [0] * 64
target = [0]
feedback = target + data
# create file
MachineLoader.feedback(machines.number_recognizer, feedback, file_name=test_file)
# append file
MachineLoader.feedback(machines.number_recognizer, feedback, file_name=test_file)
with open(feedback_file, mode="rb") as r:
lines = r.readlines()
self.assertEqual(2, len(lines))
os.remove(feedback_file)
def test_predict(self):
digits = datasets.load_digits()
from sklearn import svm
from sklearn import cross_validation
clf = svm.SVC(gamma=0.001, C=100)
clf = clf.fit(digits.data, digits.target)
cross_validation.cross_val_score(clf, digits.data[:-1], digits.target[:-1], cv=5)
predicted = clf.predict(digits.data[-1])
self.assertGreaterEqual(predicted, 0) | mit |
qifeigit/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
CUBoulder-HPCPerfAnalysis/memory | stream-analyze-time.py | 1 | 1201 | import pandas
import glob
from matplotlib import pyplot
def normalize_columns(s):
"""Fix column names"""
s = s.strip(' #')
try:
return {'Machinename':'machinename', 'Username':'username'}[s]
except:
return s
df = pandas.concat(pandas.read_csv(f, na_values='?').rename(columns=normalize_columns)
for f in glob.glob('results/fdkong-time.csv'))
print(df)
columns = ['Triad s','Add s','Dot s','Scale s', 'Copy s']
styles = ['bs-','ro-','y^-','bo-','d:']
linewidths = [1, 1, 1, 1,1,1]
fig, ax = pyplot.subplots()
ax.set_ylabel('Time(s)')
ax.set_xlabel('Block Size')
# ax.legend(lines, columns, loc='upper center')
for col, sty, lw in zip(columns, styles, linewidths):
df.plot(x='Block Size', y=col, style=sty, lw=lw, ax=ax,title='Compute Time VS Block Size')
pyplot.legend(columns,loc='best')
pyplot.show()
# Does performance depend on block size?
# df.plot(x='Block Size', y='Triad s', kind='scatter')
# df.plot(x='Block Size', y='Dot s', kind='scatter')
# pyplot.show()
# Does performance depend on the number of threads used?
# df[df['machinename'].isin(['es.mcs.anl.gov'])].plot('CPU Cores used', 'Triad MB/s', kind='scatter')
# pyplot.show()
| mit |
Astroua/TurbuStat | turbustat/statistics/tsallis/tsallis.py | 2 | 15834 | # Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
from scipy.stats import chisquare
from scipy.optimize import curve_fit
import astropy.units as u
from astropy.table import Table
from ..stats_utils import standardize, padwithzeros
from ..base_statistic import BaseStatisticMixIn
from ...io import common_types, twod_types
class Tsallis(BaseStatisticMixIn):
"""
The Tsallis Distribution (see Tofflemire et al., 2011)
Parameters
----------
img : %(dtypes)s
2D image.
header : FITS header, optional
The image header. Needed for the pixel scale.
lags : `~astropy.units.Quantity`, optional
Give the spatial lag values to compute the distribution at. The
default lag sizes are powers of 2 up to half the image size (so for a
128 by 128 image, the lags will be [1, 2, 4, 8, 16, 32, 64]).
distance : `~astropy.units.Quantity`, optional
Physical distance to the region in the data.
"""
__doc__ %= {"dtypes": " or ".join(common_types + twod_types)}
def __init__(self, img, header=None, lags=None, distance=None):
self.input_data_header(img, header)
if distance is not None:
self.distance = distance
if lags is None:
# Find the next smallest power of 2 from the smallest axis
max_power = \
np.floor(np.log2(min(self.data.shape) / 2.)).astype(int)
self.lags = [2**i for i in range(max_power + 1)] * u.pix
else:
self.lags = lags
@property
def lags(self):
'''
Lag values to calculate the statistics at.
'''
return self._lags
@lags.setter
def lags(self, values):
if not isinstance(values, u.Quantity):
raise TypeError("lags must be given as a astropy.units.Quantity.")
# Now make sure that we can convert into pixels before setting.
try:
pix_rad = self._to_pixel(values)
except Exception as e:
raise e
# The radius should be larger than a pixel
if np.any(pix_rad.value < 1):
raise ValueError("One of the chosen lags is smaller than one "
"pixel."
" Ensure that all lag values are larger than one "
"pixel.")
half_comp = (np.floor(pix_rad.value) - min(self.data.shape) / 2.)
if np.any(half_comp > 1e-10):
raise ValueError("At least one of the lags is larger than half of"
" the image size. Remove these lags from the "
"array.")
self._lags = values
def make_tsallis(self, periodic=True, num_bins=None):
'''
Calculate the Tsallis distribution at each lag.
We standardize each distribution such that it has a mean of zero and
variance of one before fitting.
If the lag values are fractions of a pixel when converted to pixel
units, the lag is rounded down to the next smallest integer value.
Parameters
----------
periodic : bool, optional
Use for simulations with periodic boundaries.
num_bins : int, optional
Number of bins to use in the histograms. Defaults to the
square-root of the number of finite points in the image.
'''
if num_bins is None:
num_bins = \
np.ceil(np.sqrt(np.isfinite(self.data).sum())).astype(int)
self._lag_arrays = np.empty((len(self.lags),
self.data.shape[0],
self.data.shape[1]))
self._lag_distribs = np.empty((len(self.lags), 2, num_bins))
# Convert the lags into pixels
pix_lags = np.floor(self._to_pixel(self.lags).value).astype(int)
for i, lag in enumerate(pix_lags):
if periodic:
pad_img = self.data
else:
pad_img = np.pad(self.data, lag, padwithzeros)
rolls = np.roll(pad_img, lag, axis=0) +\
np.roll(pad_img, -lag, axis=0) +\
np.roll(pad_img, lag, axis=1) +\
np.roll(pad_img, -lag, axis=1)
# Remove the padding
if periodic:
clip_resulting = (rolls / 4.) - pad_img
else:
clip_resulting = (rolls[lag:-lag, lag:-lag] / 4.) -\
pad_img[lag:-lag, lag:-lag]
# Normalize the data
data = standardize(clip_resulting)
# Ignore nans for the histogram
hist, bin_edges = np.histogram(data[~np.isnan(data)],
bins=num_bins)
bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2
normlog_hist = np.log10(hist / np.sum(hist, dtype="float"))
# Keep results
self._lag_arrays[i, :] = data
self._lag_distribs[i, 0, :] = bin_centres
self._lag_distribs[i, 1, :] = normlog_hist
@property
def lag_arrays(self):
'''
Arrays of the image computed at different lags.
'''
return self._lag_arrays
@property
def lag_distribs(self):
'''
Histogram bins and values compute from `~Tsallis.lag_arrays`. The
histogram values are in log10.
'''
return self._lag_distribs
def fit_tsallis(self, sigma_clip=5):
'''
Fit the Tsallis distributions.
Parameters
----------
sigma_clip : float
Sets the sigma value to clip data at. If `None`,
no clipping is performed on the data. Defaults to 5.
'''
if not hasattr(self, 'lag_distribs'):
raise Exception("Calculate the distributions first with "
"Tsallis.make_tsallis.")
self._sigma_clip = sigma_clip
self._tsallis_params = np.empty((len(self.lags), 3))
self._tsallis_stderrs = np.empty((len(self.lags), 3))
self._tsallis_chisq = np.empty((len(self.lags), 1))
for i, dist in enumerate(self.lag_distribs):
if sigma_clip is None:
# Keep all finite data
finite_mask = np.logical_and(np.isfinite(dist[0]),
np.isfinite(dist[1]))
clipped = dist[0][finite_mask], dist[1][finite_mask]
else:
clipped = clip_to_sigma(dist[0], dist[1], sigma=sigma_clip)
params, pcov = curve_fit(tsallis_function, clipped[0], clipped[1],
p0=(-np.max(clipped[1]), 1., 2.),
maxfev=100 * len(dist[0]))
fitted_vals = tsallis_function(clipped[0], *params)
self._tsallis_params[i] = params
self._tsallis_stderrs[i] = np.sqrt(np.diag(pcov))
self._tsallis_chisq[i] = np.sum((np.exp(fitted_vals) - np.exp(clipped[1]))**2. / np.exp(clipped[1]))
@property
def tsallis_params(self):
'''
Parameters of the Tsallis distribution fit at each lag value.
'''
return self._tsallis_params
@property
def tsallis_stderrs(self):
'''
Standard errors of the Tsallis distribution fit at each lag value.
'''
return self._tsallis_stderrs
@property
def tsallis_chisq(self):
'''
Reduced chi-squared values for the fit at each lag value.
'''
return self._tsallis_chisq
@property
def tsallis_table(self):
'''
Return the fit parameters, standard error, and chi-squared values as
an `~astropy.table.Table`.
'''
data = [self.lags] + [col for col in self.tsallis_params.T] + \
[col for col in self.tsallis_stderrs.T] + [self.tsallis_chisq]
names = ['lags', 'logA', 'w2', 'q', 'logA_stderr', 'w2_stderr',
'q_stderr', 'redchisq']
return Table(data, names=names)
def plot_parameters(self, save_name=None, **kwargs):
'''
Plot the fit parameters as a function of lag.
Parameters
----------
save_name : str,optional
Save name for the figure. Enables saving the plot.
kwargs : passed to `~matplotlib.pyplot.errorbar`.
'''
import matplotlib.pyplot as plt
fig, axes = plt.subplots(3, 1, sharex=True)
ax1 = axes[0]
ax1.errorbar(self.lags.value, self.tsallis_table['logA'],
yerr=self.tsallis_table['logA_stderr'],
**kwargs)
ax1.set_ylabel(r"log A")
ax1.grid()
ax2 = axes[1]
ax2.errorbar(self.lags.value, self.tsallis_table['w2'],
yerr=self.tsallis_table['w2_stderr'],
**kwargs)
ax2.set_ylabel(r"$w^2$")
ax2.grid()
ax3 = axes[2]
ax3.errorbar(self.lags.value, self.tsallis_table['q'],
yerr=self.tsallis_table['q_stderr'],
**kwargs)
ax3.set_ylabel(r"q")
ax3.grid()
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def plot_fit(self, save_name=None, color='r',
fit_color='k'):
'''
Plot the distributions and fits to the Tsallis function.
Parameters
----------
save_name : str, optional
Save name for the figure. Enables saving the plot.
'''
import matplotlib.pyplot as plt
if fit_color is None:
fit_color = color
fig, axes = plt.subplots(len(self.lags), 1, sharex=True)
for vals in zip(self.lags, self.lag_distribs,
self.lag_arrays, self.tsallis_params,
axes):
lag, dist, arr, params, ax = vals
ax.plot(dist[0], dist[1], 'D', color=color,
label="Lag {}".format(lag), alpha=0.5)
ax.plot(dist[0], tsallis_function(dist[0], *params),
color=fit_color)
# Indicate which data was used for the fits.
# Only if sigma-clipping is applied.
if self._sigma_clip is not None:
ax.axvline(self._sigma_clip, color='r', linestyle='--',
alpha=0.7)
ax.axvline(-self._sigma_clip, color='r', linestyle='--',
alpha=0.7)
ax.legend(frameon=True, loc='best')
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def run(self, verbose=False, num_bins=None, periodic=True, sigma_clip=5,
save_name=None):
'''
Run all steps.
Parameters
----------
verbose : bool, optional
Enables plotting.
num_bins : int, optional
Sets the number of bins to use in the lag histograms. Passed
to `~Tsallis.make_tsallis`.
periodic : bool, optional
Treat periodic boundaries. Passed
to `~Tsallis.make_tsallis`. Enabled by default.
sigma_clip : float
Sets the sigma value to clip data at.
Passed to :func:`fit_tsallis`.
save_name : str,optional
Save the figure when a file name is given.
'''
self.make_tsallis(num_bins=num_bins, periodic=periodic)
self.fit_tsallis(sigma_clip=sigma_clip)
if verbose:
# print the table of parameters
print(self.tsallis_table)
self.plot_fit(save_name=save_name)
return self
# class Tsallis_Distance(object):
# '''
# Distance Metric for the Tsallis Distribution.
# Parameters
# ----------
# array1 : %(dtypes)s
# 2D datas.
# array2 : %(dtypes)s
# 2D datas.
# lags : `~astropy.units.Quantity`
# Lags to calculate at.
# fiducial_model : Tsallis
# Computed Tsallis object. use to avoid recomputing.
# tsallis1_kwargs : dict, optional
# Pass kwargs to `~Tsallis.run` for array1.
# tsallis2_kwargs : dict, optional
# Pass kwargs to `~Tsallis.run` for array2.
# '''
# __doc__ %= {"dtypes": " or ".join(common_types + twod_types)}
# def __init__(self, array1, array2, lags=None, tsallis1_kwargs={},
# tsallis2_kwargs={}, fiducial_model=None,):
# super(Tsallis_Distance, self).__init__()
# if fiducial_model is not None:
# self.tsallis1 = fiducial_model
# else:
# self.tsallis1 = \
# Tsallis(array1, lags=lags).run(verbose=False,
# **tsallis1_kwargs)
# self.tsallis2 = \
# Tsallis(array2, lags=lags).run(verbose=False,
# **tsallis2_kwargs)
# self.distance = None
# def distance_metric(self, verbose=False, save_name=None):
# '''
# We do not consider the parameter a in the distance metric. Since we
# are fitting to a PDF, a is related to the number of data points and
# is therefore not a true measure of the differences between the data
# sets. The distance is computed by summing the squared difference of
# the parameters, normalized by the sums of the squares, for each lag.
# The total distance the sum between the two parameters.
# Parameters
# ----------
# verbose : bool, optional
# Enables plotting.
# save_name : str,optional
# Save the figure when a file name is given.
# '''
# w1 = self.tsallis1.tsallis_params[:, 1]
# w2 = self.tsallis2.tsallis_params[:, 1]
# q1 = self.tsallis1.tsallis_params[:, 2]
# q2 = self.tsallis2.tsallis_params[:, 2]
# # diff_a = (a1-a2)**2.
# diff_w = (w1 - w2) ** 2. / (w1 ** 2. + w2 ** 2.)
# diff_q = (q1 - q2) ** 2. / (q1 ** 2. + q2 ** 2.)
# self.distance = np.sum(diff_w + diff_q)
# if verbose:
# import matplotlib.pyplot as p
# lags = self.tsallis1.lags
# p.plot(lags, diff_w, "rD", label="Difference of w")
# p.plot(lags, diff_q, "go", label="Difference of q")
# p.legend()
# p.xscale('log', basex=2)
# p.ylabel("Normalized Difference")
# p.xlabel("Lags (pixels)")
# p.grid(True)
# if save_name is not None:
# p.savefig(save_name)
# p.close()
# else:
# p.show()
# return self
def tsallis_function(x, *p):
'''
Tsallis distribution function as given in Tofflemire
Implemented in log form. The expected parameters are
log A, w^2, and q.
Parameters
----------
x : numpy.ndarray or list
x-data
params : list
Contains the three parameter values.
'''
loga, wsquare, q = p
return (-1 / (q - 1)) * (np.log10(1 + (q - 1) *
(x ** 2. / wsquare)) + loga)
def clip_to_sigma(x, y, sigma=2):
'''
Clip to values between +/- sigma.
Parameters
----------
x : numpy.ndarray
x-data
y : numpy.ndarray
y-data
'''
clip_mask = np.logical_and(y < sigma, y > -sigma)
# And ensure all data is finite for fitting
finite_mask = np.logical_and(np.isfinite(y), np.isfinite(x))
all_mask = np.logical_and(clip_mask, finite_mask)
return x[all_mask], y[all_mask]
| mit |
dismalpy/dismalpy | dismalpy/ssm/tests/test_mlemodel.py | 1 | 25644 | """
Tests for the generic MLEModel
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from dismalpy.ssm import kalman_filter, kalman_smoother, sarimax, MLEModel
from dismalpy.ssm.mlemodel import MLEResultsWrapper
from dismalpy.ssm.tests import results_sarimax
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal, assert_raises
from nose.exc import SkipTest
from statsmodels.datasets import nile
current_path = os.path.dirname(os.path.abspath(__file__))
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
# Basic kwargs
kwargs = {
'k_states': 1, 'design': [[1]], 'transition': [[1]],
'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
def get_dummy_mod(fit=True, pandas=False):
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
if pandas:
index = pd.date_range('1960-01-01', periods=100, freq='MS')
endog = pd.TimeSeries(endog, index=index)
exog = pd.TimeSeries(exog, index=index)
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
if fit:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
else:
res = None
return mod, res
def test_wrapping():
# Test the wrapping of various Representation / KalmanFilter /
# KalmanSmoother methods / attributes
mod, _ = get_dummy_mod(fit=False)
# Test that we can get the design matrix
assert_equal(mod['design', 0, 0], 2.0 * np.arange(100))
# Test that we can set individual elements of the design matrix
mod['design', 0, 0, :] = 2
assert_equal(mod.ssm['design', 0, 0, :], 2)
assert_equal(mod.ssm['design'].shape, (1, 1, 100))
# Test that we can set the entire design matrix
mod['design'] = [[3.]]
assert_equal(mod.ssm['design', 0, 0], 3.)
# (Now it's no longer time-varying, so only 2-dim)
assert_equal(mod.ssm['design'].shape, (1, 1))
# Test that we can change the following properties: loglikelihood_burn,
# initial_variance, tolerance
assert_equal(mod.loglikelihood_burn, 1)
mod.loglikelihood_burn = 0
assert_equal(mod.ssm.loglikelihood_burn, 0)
assert_equal(mod.tolerance, mod.ssm.tolerance)
mod.tolerance = 0.123
assert_equal(mod.ssm.tolerance, 0.123)
assert_equal(mod.initial_variance, 1e10)
mod.initial_variance = 1e12
assert_equal(mod.ssm.initial_variance, 1e12)
# Test that we can use the following wrappers: initialization,
# initialize_known, initialize_stationary, initialize_approximate_diffuse
# Initialization starts off as none
assert_equal(mod.initialization, None)
# Since the SARIMAX model may be fully stationary or may have diffuse
# elements, it uses a custom initialization by default, but it can be
# overridden by users
mod.initialize_state()
# (The default initialization in this case is known because there is a non-
# stationary state corresponding to the time-varying regression parameter)
assert_equal(mod.initialization, 'known')
mod.initialize_approximate_diffuse(1e5)
assert_equal(mod.initialization, 'approximate_diffuse')
assert_equal(mod.ssm._initial_variance, 1e5)
mod.initialize_known([5.], [[40]])
assert_equal(mod.initialization, 'known')
assert_equal(mod.ssm._initial_state, [5.])
assert_equal(mod.ssm._initial_state_cov, [[40]])
mod.initialize_stationary()
assert_equal(mod.initialization, 'stationary')
# Test that we can use the following wrapper methods: set_filter_method,
# set_stability_method, set_conserve_memory, set_smoother_output
# The defaults are as follows:
assert_equal(mod.ssm.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(mod.ssm.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(mod.ssm.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
assert_equal(mod.ssm.smoother_output, kalman_smoother.SMOOTHER_ALL)
# Now, create the Cython filter object and assert that they have
# transferred correctly
mod.ssm._initialize_filter()
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# (the smoother object is so far not in Cython, so there is no
# transferring)
# Change the attributes in the model class
mod.set_filter_method(100)
mod.set_stability_method(101)
mod.set_conserve_memory(102)
mod.set_smoother_output(103)
# Assert that the changes have occurred in the ssm class
assert_equal(mod.ssm.filter_method, 100)
assert_equal(mod.ssm.stability_method, 101)
assert_equal(mod.ssm.conserve_memory, 102)
assert_equal(mod.ssm.smoother_output, 103)
# Assert that the changes have *not yet* occurred in the filter object
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# Re-initialize the filter object (this would happen automatically anytime
# loglike, filter, etc. were called)
# In this case, an error will be raised since filter_method=100 is not
# valid
# assert_raises(NotImplementedError, mod.ssm._initialize_filter)
# Now, test the setting of the other two methods by resetting the
# filter method to a valid value
mod.set_filter_method(1)
mod.ssm._initialize_filter()
# Retrieve the new kalman filter object (a new object had to be created
# due to the changing filter method)
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, 1)
assert_equal(kf.stability_method, 101)
assert_equal(kf.conserve_memory, 102)
def test_fit_misc():
true = results_sarimax.wpi1_stationary
endog = np.diff(true['data'])[1:]
mod = sarimax.SARIMAX(endog, order=(1,0,1), trend='c')
# Test optim_hessian={'opg','oim','cs'}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = mod.fit(method='ncg', disp=True, optim_hessian='opg')
res2 = mod.fit(method='ncg', disp=True, optim_hessian='oim')
res3 = mod.fit(method='ncg', disp=True, optim_hessian='cs')
assert_raises(NotImplementedError, mod.fit, method='ncg', disp=False, optim_hessian='a')
# Check that the Hessians broadly result in the same optimum
assert_allclose(res1.llf, res2.llf, rtol=1e-2)
assert_allclose(res1.llf, res3.llf, rtol=1e-2)
# Test return_params=True
mod, _ = get_dummy_mod(fit=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res_params = mod.fit(disp=-1, return_params=True)
assert_almost_equal(res_params, [0,0], 7)
def test_score_misc():
mod, res = get_dummy_mod()
# Test that the score function works
mod.score(res.params)
def test_from_formula():
assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1,2,3))
def test_cov_params():
mod, res = get_dummy_mod()
# Smoke test for each of the covariance types
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(res.params, disp=-1, cov_type='cs')
assert_equal(res.cov_type, 'cs')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical (complex-step) differentiation.')
res = mod.fit(res.params, disp=-1, cov_type='delta')
assert_equal(res.cov_type, 'delta')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical differentiation and the delta method (method of propagation of errors) applied to the parameter transformation function.')
res = mod.fit(res.params, disp=-1, cov_type='oim')
assert_equal(res.cov_type, 'oim')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the observed information matrix described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='opg')
assert_equal(res.cov_type, 'opg')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the outer product of gradients.')
res = mod.fit(res.params, disp=-1, cov_type='robust')
assert_equal(res.cov_type, 'robust')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_oim')
assert_equal(res.cov_type, 'robust_oim')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_cs')
assert_equal(res.cov_type, 'robust_cs')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using numerical (complex-step) differentiation.')
assert_raises(NotImplementedError, mod.fit, res.params, disp=-1, cov_type='invalid_cov_type')
def test_transform():
# The transforms in MLEModel are noops
mod = MLEModel([1,2], **kwargs)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [2, 3])
assert_allclose(mod.untransform_params([2, 3]), [2, 3])
# Smoke test for transformation in `filter`, `update`, `loglike`,
# `loglikeobs`
mod.filter([], transformed=False)
mod.update([], transformed=False)
mod.loglike([], transformed=False)
mod.loglikeobs([], transformed=False)
# Note that mod is an SARIMAX instance, and the two parameters are
# variances
mod, _ = get_dummy_mod(fit=False)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [4, 9])
assert_allclose(mod.untransform_params([4, 9]), [2, 3])
# Test transformation in `filter`
res = mod.filter([2, 3], transformed=True)
assert_allclose(res.params, [2, 3])
res = mod.filter([2, 3], transformed=False)
assert_allclose(res.params, [4, 9])
def test_filter():
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
# Test return of ssm object
res = mod.filter([], return_ssm=True)
assert_equal(isinstance(res, kalman_filter.FilterResults), True)
# Test return of full results object
res = mod.filter([])
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'opg')
# Test return of full results object, specific covariance type
res = mod.filter([], cov_type='oim')
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'oim')
def test_params():
mod = MLEModel([1,2], **kwargs)
# By default start_params raises NotImplementedError
assert_raises(NotImplementedError, lambda: mod.start_params)
# But param names are by default an empty array
assert_equal(mod.param_names, [])
# We can set them in the object if we want
mod._start_params = [1]
mod._param_names = ['a']
assert_equal(mod.start_params, [1])
assert_equal(mod.param_names, ['a'])
def check_results(pandas):
mod, res = get_dummy_mod(pandas=pandas)
# Test fitted values
assert_almost_equal(res.fittedvalues[2:], mod.endog[2:].squeeze())
# Test residuals
assert_almost_equal(res.resid[2:], np.zeros(mod.nobs-2))
# Test loglikelihood_burn
assert_equal(res.loglikelihood_burn, 1)
def test_results(pandas=False):
check_results(pandas=False)
check_results(pandas=True)
def test_predict():
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.TimeSeries([1,2], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Test that predict with start=None, end=None does prediction with full
# dataset
predict = res.predict()
assert_equal(predict.shape, (mod.nobs,))
assert_allclose(res.get_prediction().predicted_mean, predict)
# Test a string value to the dynamic option
assert_allclose(res.predict(dynamic='1981-01-01'), res.predict())
# Test an invalid date string value to the dynamic option
assert_raises(ValueError, res.predict, dynamic='1982-01-01')
# Test for passing a string to predict when dates are not set
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
assert_raises(ValueError, res.predict, dynamic='string')
def test_forecast():
# Numpy
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
forecast = res.forecast(steps=10)
assert_allclose(forecast, np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, forecast)
# Pandas
index = pd.date_range('1960-01-01', periods=2, freq='MS')
mod = MLEModel(pd.Series([1,2], index=index), **kwargs)
res = mod.filter([])
assert_allclose(res.forecast(steps=10), np.ones((10,)) * 2)
assert_allclose(res.forecast(steps='1960-12-01'), np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, np.ones((10,)) * 2)
def test_summary():
dates = pd.date_range(start='1980-01-01', end='1984-01-01', freq='AS')
endog = pd.TimeSeries([1,2,3,4,5], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Get the summary
txt = str(res.summary())
# Test res.summary when the model has dates
assert_equal(re.search('Sample:\s+01-01-1980', txt) is not None, True)
assert_equal(re.search('\s+- 01-01-1984', txt) is not None, True)
# Test res.summary when `model_name` was not provided
assert_equal(re.search('Model:\s+MLEModel', txt) is not None, True)
def check_endog(endog, nobs=2, k_endog=1, **kwargs):
# create the model
mod = MLEModel(endog, **kwargs)
# the data directly available in the model is the Statsmodels version of
# the data; it should be 2-dim, C-contiguous, long-shaped:
# (nobs, k_endog) == (2, 1)
assert_equal(mod.endog.ndim, 2)
assert_equal(mod.endog.flags['C_CONTIGUOUS'], True)
assert_equal(mod.endog.shape, (nobs, k_endog))
# the data in the `ssm` object is the state space version of the data; it
# should be 2-dim, F-contiguous, wide-shaped (k_endog, nobs) == (1, 2)
# and it should share data with mod.endog
assert_equal(mod.ssm.endog.ndim, 2)
assert_equal(mod.ssm.endog.flags['F_CONTIGUOUS'], True)
assert_equal(mod.ssm.endog.shape, (k_endog, nobs))
assert_equal(mod.ssm.endog.base is mod.endog, True)
return mod
def test_basic_endog():
# Test various types of basic python endog inputs (e.g. lists, scalars...)
# Check cannot call with non-array-like
# fails due to checks in Statsmodels base classes
assert_raises(ValueError, MLEModel, endog=1, k_states=1)
assert_raises(ValueError, MLEModel, endog='a', k_states=1)
assert_raises(ValueError, MLEModel, endog=True, k_states=1)
# Check behavior with different types
mod = MLEModel([1], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([1.], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([True], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel(['a'], **kwargs)
# raises error due to inability coerce string to numeric
assert_raises(ValueError, mod.filter, [])
# Check that a different iterable tpyes give the expected result
endog = [1.,2.]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = [[1.],[2.]]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = (1.,2.)
mod = check_endog(endog, **kwargs)
mod.filter([])
def test_numpy_endog():
# Test various types of numpy endog inputs
# Check behavior of the link maintained between passed `endog` and
# `mod.endog` arrays
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
assert_equal(mod.endog.base is not mod.data.orig_endog, True)
assert_equal(mod.endog.base is not endog, True)
assert_equal(mod.data.orig_endog.base is not endog, True)
endog[0] = 2
# there is no link to mod.endog
assert_equal(mod.endog, np.r_[1, 2].reshape(2,1))
# there remains a link to mod.data.orig_endog
assert_equal(mod.data.orig_endog, endog)
# Check behavior with different memory layouts / shapes
# Example (failure): 0-dim array
endog = np.array(1.)
# raises error due to len(endog) failing in Statsmodels base classes
assert_raises(TypeError, check_endog, endog, **kwargs)
# Example : 1-dim array, both C- and F-contiguous, length 2
endog = np.array([1.,2.])
assert_equal(endog.ndim, 1)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (2,))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, long-shaped: (nobs, k_endog)
endog = np.array([1., 2.]).reshape(2, 1)
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
assert_equal(endog.flags['F_CONTIGUOUS'], False)
assert_equal(endog.shape, (2, 1))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, wide-shaped: (k_endog, nobs)
endog = np.array([1., 2.]).reshape(1, 2)
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
assert_equal(endog.flags['F_CONTIGUOUS'], False)
assert_equal(endog.shape, (1, 2))
# raises error because arrays are always interpreted as
# (nobs, k_endog), which means that k_endog=2 is incompatibile with shape
# of design matrix (1, 1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : 2-dim array, F-contiguous, long-shaped (nobs, k_endog)
endog = np.array([1., 2.]).reshape(1, 2).transpose()
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], False)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (2, 1))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, F-contiguous, wide-shaped (k_endog, nobs)
endog = np.array([1., 2.]).reshape(2, 1).transpose()
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], False)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (1, 2))
# raises error because arrays are always interpreted as
# (nobs, k_endog), which means that k_endog=2 is incompatibile with shape
# of design matrix (1, 1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example (failure): 3-dim array
endog = np.array([1., 2.]).reshape(2, 1, 1)
# raises error due to direct ndim check in Statsmodels base classes
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : np.array with 2 columns
# Update kwargs for k_endog=2
kwargs2 = {
'k_states': 1, 'design': [[1], [0.]], 'obs_cov': [[1, 0], [0, 1]],
'transition': [[1]], 'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
endog = np.array([[1., 2.], [3., 4.]])
mod = check_endog(endog, k_endog=2, **kwargs2)
mod.filter([])
def test_pandas_endog():
# Test various types of pandas endog inputs (e.g. TimeSeries, etc.)
# Example (failure): pandas.Series, no dates
endog = pd.Series([1., 2.])
# raises error due to no dates
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : pandas.Series
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.Series([1., 2.], index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : pandas.Series, string datatype
endog = pd.Series(['a'], index=dates)
# raises error due to direct type casting check in Statsmodels base classes
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : pandas.TimeSeries
endog = pd.TimeSeries([1., 2.], index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : pandas.DataFrame with 1 column
endog = pd.DataFrame({'a': [1., 2.]}, index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example (failure): pandas.DataFrame with 2 columns
endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates)
# raises error because 2-columns means k_endog=2, but the design matrix
# set in **kwargs is shaped (1,1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Check behavior of the link maintained between passed `endog` and
# `mod.endog` arrays
endog = pd.DataFrame({'a': [1., 2.]}, index=dates)
mod = check_endog(endog, **kwargs)
assert_equal(mod.endog.base is not mod.data.orig_endog, True)
assert_equal(mod.endog.base is not endog, True)
assert_equal(mod.data.orig_endog.values.base is not endog, True)
endog.iloc[0, 0] = 2
# there is no link to mod.endog
assert_equal(mod.endog, np.r_[1, 2].reshape(2,1))
# there remains a link to mod.data.orig_endog
assert_allclose(mod.data.orig_endog, endog)
# Example : pandas.DataFrame with 2 columns
# Update kwargs for k_endog=2
kwargs2 = {
'k_states': 1, 'design': [[1], [0.]], 'obs_cov': [[1, 0], [0, 1]],
'transition': [[1]], 'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates)
mod = check_endog(endog, k_endog=2, **kwargs2)
mod.filter([])
def test_diagnostics():
mod, res = get_dummy_mod()
# Make sure method=None selects the appropriate test
actual = res.test_normality(method=None)
desired = res.test_normality(method='jarquebera')
assert_allclose(actual, desired)
actual = res.test_heteroskedasticity(method=None)
desired = res.test_heteroskedasticity(method='breakvar')
assert_allclose(actual, desired)
actual = res.test_serial_correlation(method=None)
desired = res.test_serial_correlation(method='ljungbox')
assert_allclose(actual, desired)
def test_diagnostics_nile_eviews():
# Test the diagnostic tests using the Nile dataset. Results are from
# "Fitting State Space Models with EViews" (Van den Bossche 2011,
# Journal of Statistical Software).
# For parameter values, see Figure 2
# For Ljung-Box and Jarque-Bera statistics and p-values, see Figure 5
# The Heteroskedasticity statistic is not provided in this paper.
niledata = nile.data.load_pandas().data
niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS')
mod = MLEModel(niledata['volume'], k_states=1,
initialization='approximate_diffuse', initial_variance=1e15,
loglikelihood_burn=1)
mod.ssm['design', 0, 0] = 1
mod.ssm['obs_cov', 0, 0] = np.exp(9.600350)
mod.ssm['transition', 0, 0] = 1
mod.ssm['selection', 0, 0] = 1
mod.ssm['state_cov', 0, 0] = np.exp(7.348705)
res = mod.filter([])
# Test Ljung-Box
# Note: only 3 digits provided in the reference paper
actual = res.test_serial_correlation(method='ljungbox', lags=10)[0, :, -1]
assert_allclose(actual, [13.117, 0.217], atol=1e-3)
# Test Jarque-Bera
actual = res.test_normality(method='jarquebera')[0, :2]
assert_allclose(actual, [0.041686, 0.979373], atol=1e-5)
def test_diagnostics_nile_durbinkoopman():
# Test the diagnostic tests using the Nile dataset. Results are from
# Durbin and Koopman (2012); parameter values reported on page 37; test
# statistics on page 40
niledata = nile.data.load_pandas().data
niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS')
mod = MLEModel(niledata['volume'], k_states=1,
initialization='approximate_diffuse', initial_variance=1e15,
loglikelihood_burn=1)
mod.ssm['design', 0, 0] = 1
mod.ssm['obs_cov', 0, 0] = 15099.
mod.ssm['transition', 0, 0] = 1
mod.ssm['selection', 0, 0] = 1
mod.ssm['state_cov', 0, 0] = 1469.1
res = mod.filter([])
# Test Ljung-Box
# Note: only 3 digits provided in the reference paper
actual = res.test_serial_correlation(method='ljungbox', lags=9)[0, 0, -1]
assert_allclose(actual, [8.84], atol=1e-2)
# Test Jarque-Bera
# Note: The book reports 0.09 for Kurtosis, because it is reporting the
# statistic less the mean of the Kurtosis distribution (which is 3).
norm = res.test_normality(method='jarquebera')[0]
actual = [norm[0], norm[2], norm[3]]
assert_allclose(actual, [0.05, -0.03, 3.09], atol=1e-2)
# Test Heteroskedasticity
# Note: only 2 digits provided in the book
actual = res.test_heteroskedasticity(method='breakvar')[0, 0]
assert_allclose(actual, [0.61], atol=1e-2)
| bsd-2-clause |
sahilTakiar/spark | python/pyspark/sql/dataframe.py | 4 | 90410 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
import warnings
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import ArrowStreamSerializer, BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import IntegralType
from pyspark.sql.types import *
from pyspark.util import _exception_message
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.", DeprecationWarning)
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(2.4)
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
:param vertical: If set to True, print output rows vertically (one line
per column value).
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a dataframe with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
import cgi
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: cgi.escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: cgi.escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this DataFrame, which is especially useful in iterative algorithms where the
plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with L{SparkContext.setCheckpointDir()}.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.3)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this DataFrame, which is especially useful in iterative
algorithms where the plan may grow exponentially. Local checkpoints are stored in the
executors using the caching subsystem and therefore they are not reliable.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current DataFrame.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
for p in parameters:
if not isinstance(p, str):
raise TypeError(
"all parameters should be str, got {0} of type {1}".format(p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator()
return _load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
:param numPartitions: int, to specify the target number of partitions
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since("2.4.0")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is range partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
4
>>> df.sample(fraction=0.5, seed=3).count()
4
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
"""
if not isinstance(col, basestring):
raise ValueError("col must be a string, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@since(2.3)
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
:param colName: string, column name specified as a regex.
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, basestring):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``full_outer``, ``left``, ``left_outer``, ``right``, ``right_outer``,
``left_semi``, and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since("2.3.0")
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (eg, 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See also describe for basic statistics.
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
.. note:: Deprecated in 2.0, use :func:`union` instead.
"""
warnings.warn("Deprecated in 2.0, use union instead.", DeprecationWarning)
return self.union(other)
@since(2.3)
def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
"""
return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(2.4)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this dataframe and other
dataframe while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL.
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, bool or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, bool, dict)):
raise ValueError("value should be a float, int, long, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, long, float, string, list or None.
The replacement value must be a bool, int, long, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, long, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, long, basestring)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[http://dx.doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (basestring, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, basestring)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, basestring):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this DataFrame; attempting to add
a column from some other dataframe will raise an error.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param new: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. note:: This method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
if self.sql_ctx._conf.pandasRespectSessionTimeZone():
timezone = self.sql_ctx._conf.sessionLocalTimeZone()
else:
timezone = None
if self.sql_ctx._conf.arrowEnabled():
use_arrow = True
try:
from pyspark.sql.types import to_arrow_schema
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
to_arrow_schema(self.schema)
except Exception as e:
if self.sql_ctx._conf.arrowFallbackEnabled():
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
use_arrow = False
else:
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
# Try to use Arrow optimization when the schema is supported and the required version
# of PyArrow is found, if 'spark.sql.execution.arrow.enabled' is enabled.
if use_arrow:
try:
from pyspark.sql.types import _check_dataframe_convert_date, \
_check_dataframe_localize_timestamps
import pyarrow
batches = self._collectAsArrow()
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
pdf = table.to_pandas()
pdf = _check_dataframe_convert_date(pdf, self.schema)
return _check_dataframe_localize_timestamps(pdf, timezone)
else:
return pd.DataFrame.from_records([], columns=self.columns)
except Exception as e:
# We might have to allow fallback here as well but multiple Spark jobs can
# be executed. So, simply fail in this case for now.
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and can not continue. Note that "
"'spark.sql.execution.arrow.fallback.enabled' does not have an effect "
"on failures in the middle of computation.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
# Below is toPandas without Arrow optimization.
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
dtype = {}
for field in self.schema:
pandas_type = _to_corrected_pandas_type(field.dataType)
# SPARK-21766: if an integer field is nullable and has null values, it can be
# inferred by pandas as float column. Once we convert the column with NaN back
# to integer type e.g., np.int16, we will hit exception. So we use the inferred
# float type, not the corrected type from the schema in this case.
if pandas_type is not None and \
not(isinstance(field.dataType, IntegralType) and field.nullable and
pdf[field.name].isnull().any()):
dtype[field.name] = pandas_type
for f, t in dtype.items():
pdf[f] = pdf[f].astype(t, copy=False)
if timezone is None:
return pdf
else:
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
for field in self.schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_convert_timestamps_local_tz(pdf[field.name], timezone)
return pdf
def _collectAsArrow(self):
"""
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
and available on driver and worker Python environments.
.. note:: Experimental.
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectAsArrowToPython()
return list(_load_from_socket(sock_info, ArrowStreamSerializer()))
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.
This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == FloatType:
return np.float32
else:
return None
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),
Row(name='Bob', spy=None, age=5),
Row(name='Mallory', spy=True, age=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
AnasGhrab/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/units/ellipse_with_units.py | 9 | 1809 | """
Compare the ellipse generated with arcs versus a polygonal approximation
"""
from basic_units import cm
import numpy as np
from matplotlib import patches
import matplotlib.pyplot as plt
xcenter, ycenter = 0.38*cm, 0.52*cm
#xcenter, ycenter = 0., 0.
width, height = 1e-1*cm, 3e-1*cm
angle = -30
theta = np.arange(0.0, 360.0, 1.0)*np.pi/180.0
x = 0.5 * width * np.cos(theta)
y = 0.5 * height * np.sin(theta)
rtheta = angle*np.pi/180.
R = np.array([
[np.cos(rtheta), -np.sin(rtheta)],
[np.sin(rtheta), np.cos(rtheta)],
])
x, y = np.dot(R, np.array([x, y]))
x += xcenter
y += ycenter
fig = plt.figure()
ax = fig.add_subplot(211, aspect='auto')
ax.fill(x, y, alpha=0.2, facecolor='yellow', edgecolor='yellow', linewidth=1, zorder=1)
e1 = patches.Ellipse((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e1)
ax = fig.add_subplot(212, aspect='equal')
ax.fill(x, y, alpha=0.2, facecolor='green', edgecolor='green', zorder=1)
e2 = patches.Ellipse((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e2)
#fig.savefig('ellipse_compare.png')
fig.savefig('ellipse_compare')
fig = plt.figure()
ax = fig.add_subplot(211, aspect='auto')
ax.fill(x, y, alpha=0.2, facecolor='yellow', edgecolor='yellow', linewidth=1, zorder=1)
e1 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e1)
ax = fig.add_subplot(212, aspect='equal')
ax.fill(x, y, alpha=0.2, facecolor='green', edgecolor='green', zorder=1)
e2 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e2)
#fig.savefig('arc_compare.png')
fig.savefig('arc_compare')
plt.show()
| mit |
icdishb/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
seekshreyas/obidroid | checkApps.py | 1 | 3235 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@author: Shreyas <[email protected]>
CheckApps
=========
Given an input of apps, check if the app exists
"""
from __future__ import division
from optparse import OptionParser
import time
import datetime
import requests
import MySQLdb
import pandas as pd
import numpy as np
import pandas.io.sql as psql
def getUserInput():
"""
Get User Input
"""
optionparser = OptionParser()
optionparser.add_option('-f', '--file', dest='file')
optionparser.add_option('-d', '--db', dest='db')
optionparser.add_option('-s', '--server', dest='server')
optionparser.add_option('-u', '--username', dest='un')
optionparser.add_option('-p', '--password', dest='pw')
(option, args) = optionparser.parse_args()
#if not option.file:
# return optionparser.error('Data File path not provided.\n Usage: --file="path.to.appData"')
#elif not option
return {
'file': option.file,
'db': option.db,
'server': option.server,
'username': option.un,
'password': option.pw
}
def checkApps(df):
"""
Input a dataframe of app ids and check if the apps exist
in the store
"""
def getApp(id):
relaxtime = 1 # 5 s timeout
time.sleep(relaxtime)
print 'I\'m checking: ' + id
r = requests.get(stores['google']+id)
print id, r.status_code
return r.status_code
stores = {
'google' :'https://play.google.com//store/apps/details?id='
}
print df.head(10)
# df['appId'].apply(getApp)
df['status'] = df['package'].apply(getApp)
return df
def getDataframeFromDatabase(host, db, un, pw):
#query = "SELECT package from potential_unfair_apps LIMIT 1000;"
query = "SELECT package from potential_unfair_apps;"
print query
conn = MySQLdb.connect(host = host, user = un, passwd = pw, db = db)
unfair_apps_df = psql.frame_query(query, conn)
return unfair_apps_df
def pushDataframeToDatabase(df, host, db, un, pw, date):
print "Date: ", date
table_name = 'potential_unfair_apps_' + date
print "Database name: ", table_name
conn = MySQLdb.connect(host = host, user = un, passwd = pw, db = db)
df.to_sql(con=conn, name=table_name, if_exists='append', flavor='mysql')
def main():
userInput = getUserInput()
unfair_df = None
timestamp = time.time()
date_str = datetime.datetime.fromtimestamp(timestamp).strftime("%Y%m%d_%H%M%S")
print userInput
if userInput['file'] != None:
ptl_unfair_df = pd.read_csv(userInput['file'])
else:
ptl_unfair_df = getDataframeFromDatabase(userInput['server'], userInput['db'],
userInput['username'], userInput['password'])
# split dataframe to a manageable size
ptl_unfair_df_list = np.array_split(ptl_unfair_df, len(ptl_unfair_df)/100)
for x in range(len(ptl_unfair_df_list)):
print 'processing batch ' + str(x)
unfair_df = checkApps(ptl_unfair_df_list[x])
pushDataframeToDatabase(unfair_df, userInput['server'], userInput['db'],
userInput['username'], userInput['password'], date_str)
if __name__ == '__main__':
main()
| mit |
garydoranjr/misvm | misvm/svm.py | 1 | 5217 | """
Implements a standard SVM
"""
from __future__ import print_function, division
import numpy as np
from misvm.quadprog import quadprog
from misvm.kernel import by_name as kernel_by_name
from misvm.util import spdiag
from sklearn.base import ClassifierMixin, BaseEstimator
class SVM(ClassifierMixin, BaseEstimator):
"""
A standard supervised SVM implementation.
"""
def __init__(self, kernel='linear', C=1.0, p=3, gamma=1e0, scale_C=True,
verbose=True, sv_cutoff=1e-7):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if True [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
"""
self.kernel = kernel
self.C = C
self.gamma = gamma
self.p = p
self.scale_C = scale_C
self.verbose = verbose
self.sv_cutoff = sv_cutoff
self._X = None
self._y = None
self._objective = None
self._alphas = None
self._sv = None
self._sv_alphas = None
self._sv_X = None
self._sv_y = None
self._b = None
self._predictions = None
def fit(self, X, y):
"""
@param X : an n-by-m array-like object containing n examples with m
features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._X = np.asmatrix(X)
self._y = np.asmatrix(y).reshape((-1, 1))
if self.scale_C:
C = self.C / float(len(self._X))
else:
C = self.C
K, H, f, A, b, lb, ub = self._setup_svm(self._X, self._y, C)
# Solve QP
self._alphas, self._objective = quadprog(H, f, A, b, lb, ub,
self.verbose)
self._compute_separator(K)
def _compute_separator(self, K):
self._sv = np.nonzero(self._alphas.flat > self.sv_cutoff)
self._sv_alphas = self._alphas[self._sv]
self._sv_X = self._X[self._sv]
self._sv_y = self._y[self._sv]
n = len(self._sv_X)
if n == 0:
self._b = 0.0
self._predictions = np.zeros(len(self._X))
else:
_sv_all_K = K[self._sv]
_sv_K = _sv_all_K.T[self._sv].T
e = np.matrix(np.ones((n, 1)))
D = spdiag(self._sv_y)
self._b = float(e.T * D * e - self._sv_alphas.T * D * _sv_K * e) / n
self._predictions = np.array(self._b
+ self._sv_alphas.T * D * _sv_all_K).reshape((-1,))
def predict(self, X):
"""
@param X : an n-by-m array-like object containing n examples with m
features
@return : an array of length n containing real-valued label predictions
(threshold at zero to produce binary predictions)
"""
if self._sv_X is None or len(self._sv_X) == 0:
return np.zeros(len(X))
else:
kernel = kernel_by_name(self.kernel, p=self.p, gamma=self.gamma)
K = kernel(np.asmatrix(X), self._sv_X)
return np.array(self._b + K * spdiag(self._sv_y) * self._sv_alphas).reshape((-1,))
def _setup_svm(self, examples, classes, C):
kernel = kernel_by_name(self.kernel, gamma=self.gamma, p=self.p)
n = len(examples)
e = np.matrix(np.ones((n, 1)))
# Kernel and Hessian
if kernel is None:
K = None
H = None
else:
K = _smart_kernel(kernel, examples)
D = spdiag(classes)
H = D * K * D
# Term for -sum of alphas
f = -e
# Sum(y_i * alpha_i) = 0
A = classes.T.astype(float)
b = np.matrix([0.0])
# 0 <= alpha_i <= C
lb = np.matrix(np.zeros((n, 1)))
if type(C) == float:
ub = C * e
else:
# Allow for C to be an array
ub = C
return K, H, f, A, b, lb, ub
def _smart_kernel(kernel, examples):
"""
Optimize the case when instances are
treated as singleton bags. In such
cases, singleton bags should be placed
at the beginning of the list of examples.
"""
if type(examples) == list:
for i, bag in enumerate(examples):
if len(bag) > 1:
break
singletons, bags = examples[:i], examples[i:]
if singletons and bags:
ss = kernel(singletons, singletons)
sb = kernel(singletons, bags)
bb = kernel(bags, bags)
return np.bmat([[ss, sb], [sb.T, bb]])
return kernel(examples, examples)
| bsd-3-clause |
jakirkham/mpld3 | examples/heart_path.py | 19 | 3958 | """
Patches and Paths
=================
This is a demo adapted from a `matplotlib gallery example
<http://matplotlib.org/examples/shapes_and_collections/path_patch_demo.html>`_
This example adds a custom D3 plugin allowing the user to drag the path
control-points and see the effect on the path.
Use the toolbar buttons at the bottom-right of the plot to enable zooming
and panning, and to reset the view.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import mpld3
from mpld3 import plugins, utils
class LinkedDragPlugin(plugins.PluginBase):
JAVASCRIPT = r"""
mpld3.register_plugin("drag", DragPlugin);
DragPlugin.prototype = Object.create(mpld3.Plugin.prototype);
DragPlugin.prototype.constructor = DragPlugin;
DragPlugin.prototype.requiredProps = ["idpts", "idline", "idpatch"];
DragPlugin.prototype.defaultProps = {}
function DragPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
DragPlugin.prototype.draw = function(){
var patchobj = mpld3.get_element(this.props.idpatch, this.fig);
var ptsobj = mpld3.get_element(this.props.idpts, this.fig);
var lineobj = mpld3.get_element(this.props.idline, this.fig);
var drag = d3.behavior.drag()
.origin(function(d) { return {x:ptsobj.ax.x(d[0]),
y:ptsobj.ax.y(d[1])}; })
.on("dragstart", dragstarted)
.on("drag", dragged)
.on("dragend", dragended);
lineobj.path.attr("d", lineobj.datafunc(ptsobj.offsets));
patchobj.path.attr("d", patchobj.datafunc(ptsobj.offsets,
patchobj.pathcodes));
lineobj.data = ptsobj.offsets;
patchobj.data = ptsobj.offsets;
ptsobj.elements()
.data(ptsobj.offsets)
.style("cursor", "default")
.call(drag);
function dragstarted(d) {
d3.event.sourceEvent.stopPropagation();
d3.select(this).classed("dragging", true);
}
function dragged(d, i) {
d[0] = ptsobj.ax.x.invert(d3.event.x);
d[1] = ptsobj.ax.y.invert(d3.event.y);
d3.select(this)
.attr("transform", "translate(" + [d3.event.x,d3.event.y] + ")");
lineobj.path.attr("d", lineobj.datafunc(ptsobj.offsets));
patchobj.path.attr("d", patchobj.datafunc(ptsobj.offsets,
patchobj.pathcodes));
}
function dragended(d, i) {
d3.select(this).classed("dragging", false);
}
}
mpld3.register_plugin("drag", DragPlugin);
"""
def __init__(self, points, line, patch):
if isinstance(points, mpl.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "drag",
"idpts": utils.get_id(points, suffix),
"idline": utils.get_id(line),
"idpatch": utils.get_id(patch)}
fig, ax = plt.subplots()
Path = mpath.Path
path_data = [
(Path.MOVETO, (1.58, -2.57)),
(Path.CURVE4, (0.35, -1.1)),
(Path.CURVE4, (-1.75, 2.0)),
(Path.CURVE4, (0.375, 2.0)),
(Path.LINETO, (0.85, 1.15)),
(Path.CURVE4, (2.2, 3.2)),
(Path.CURVE4, (3, 0.05)),
(Path.CURVE4, (2.0, -0.5)),
(Path.CLOSEPOLY, (1.58, -2.57)),
]
codes, verts = zip(*path_data)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, facecolor='r', alpha=0.5)
ax.add_patch(patch)
# plot control points and connecting lines
x, y = zip(*path.vertices[:-1])
points = ax.plot(x, y, 'go', ms=10)
line = ax.plot(x, y, '-k')
ax.grid(True, color='gray', alpha=0.5)
ax.axis('equal')
ax.set_title("Drag Points to Change Path", fontsize=18)
plugins.connect(fig, LinkedDragPlugin(points[0], line[0], patch))
mpld3.show()
| bsd-3-clause |
0todd0000/rft1d | rft1d/examples/random_fields_0.py | 2 | 1440 |
'''
Verbose random field generation.
Note:
When FWHM gets large (2FWHM>nNodes), the data should be padded prior to filtering.
Use **rft1d.random.randn1d** for optional padding.
'''
import numpy as np
from scipy.ndimage import gaussian_filter1d
from matplotlib import pyplot
#(0) Set parameters:
np.random.seed(12345)
nResponses = 5
nNodes = 101
FWHM = 20.0
#(1) Generate Gaussian 1D fields:
y = np.random.randn(nResponses, nNodes)
#convolve with a Gaussian kernel:
sd = FWHM / np.sqrt(8*np.log(2))
y = gaussian_filter1d(y, sd, axis=1, mode='wrap')
#scale to unit variance:
'''
Restore filtered data to unit variance.
This code is modified from "randomtalk.m" by Matthew Brett (Oct 1999)
Downloaded from http://www.fil.ion.ucl.ac.uk/~wpenny/mbi/index.html on 1 Aug 2014
'''
### define Gaussian kernel
t = np.arange( -0.5*(nNodes-1) , 0.5*(nNodes-1)+1 )
gf = np.exp(-(t**2) / (2*sd**2))
gf /= gf.sum()
### expected variance for this kernel
AG = np.fft.fft(gf)
Pag = AG * np.conj(AG) #power of the noise
COV = np.real( np.fft.ifft(Pag) )
svar = COV[0]
scale = np.sqrt(1.0/svar)
### scale the data:
y *= scale
#(2) Plot:
pyplot.close('all')
pyplot.plot(y.T)
pyplot.plot([0,100], [0,0], 'k:')
pyplot.xlabel('Field position', size=16)
pyplot.ylabel('z', size=20)
pyplot.title('Random (Gaussian) fields', size=20)
pyplot.show()
| gpl-3.0 |
evanbiederstedt/RRBSfun | scripts/methylation_CLL_2.py | 1 | 1933 |
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
os.chdir('/Users/evanbiederstedt/Downloads/RRBS_data_files')
# set glob subdirectory via cell batch
cll_cells2 = glob.glob("RRBS_NormalB_CLL*")
newdf1 = pd.DataFrame()
for filename in cll_cells2:
df = pd.read_table(filename)
df['filename'] = str(filename)
df = df.drop(['start', 'strand', 'avgWeightedEnt', 'CpGEntropy', 'avgReadCpGs',
'tss', 'genes', 'exons', 'introns', 'promoter', 'cgi', 'geneDensity', 'ctcfUpstream', 'ctcfDownstream',
'ctcfDensity', 'geneDistalRegulatoryModules', 'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance',
'3PrimeUTRDistance', '5PrimeUTR', '5PrimeUTRDistance', 'firstExon',
'geneDistalRegulatoryModulesK562', 'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance',
'tssDistance', 'genesDistance', 'exonsDistance', 'intronsDistance', 'promoterDistance', 'cgiDistance',
'ctcf', 'ctcfDistance', 'geneDistalRegulatoryModulesDistance', 'vistaEnhancersDistance', 'firstExonDistance'], axis=1)
chromosomes = ['chr2', 'chr5', 'chr11']
df = df[(df['chr'].isin(chromosomes))]
df["total_reads"] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df = df.sum()
# Methylation == # methylated reads per CpG site / total # of reads per CpG site
df["percent_methylation"] = df["methReadCount"]/df["total_reads"]
newdf1 = newdf1.append(df, ignore_index=True)
newdf1 = newdf1.drop(['chr'], axis=1)
newdf1 = newdf1[['filename', 'percent_methylation', 'thisMeth', 'thisUnmeth', 'methReadCount', 'unmethReadCount', 'mixedReadCount', 'total_reads']]
# export as .csv
newdf1.to_csv('CLL_cells_methylation_chr2_5_11B.csv')
| mit |
GuessWhoSamFoo/pandas | pandas/tests/scalar/test_nat.py | 1 | 10666 | from datetime import datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT
import pandas.compat as compat
from pandas import (
DatetimeIndex, Index, NaT, Period, Series, Timedelta, TimedeltaIndex,
Timestamp)
from pandas.core.arrays import PeriodArray
from pandas.util import testing as tm
@pytest.mark.parametrize("nat,idx", [(Timestamp("NaT"), DatetimeIndex),
(Timedelta("NaT"), TimedeltaIndex),
(Period("NaT", freq="M"), PeriodArray)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
ser = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"),
NaT, "NaT", "nat"])
def test_identity(klass, value):
assert klass(value) is NaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
@pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan])
def test_equality(klass, value):
if klass is Period and value == "":
pytest.skip("Period cannot parse empty string")
assert klass(value).value == iNaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta])
@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
@pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
def test_round_nat(klass, method, freq):
# see gh-14940
ts = klass("nat")
round_method = getattr(ts, method)
assert round_method(freq) is ts
@pytest.mark.parametrize("method", [
"astimezone", "combine", "ctime", "dst", "fromordinal",
"fromtimestamp", "isocalendar", "strftime", "strptime",
"time", "timestamp", "timetuple", "timetz", "toordinal",
"tzname", "utcfromtimestamp", "utcnow", "utcoffset",
"utctimetuple", "timestamp"
])
def test_nat_methods_raise(method):
# see gh-9513, gh-17329
msg = "NaTType does not support {method}".format(method=method)
with pytest.raises(ValueError, match=msg):
getattr(NaT, method)()
@pytest.mark.parametrize("method", [
"weekday", "isoweekday"
])
def test_nat_methods_nan(method):
# see gh-9513, gh-17329
assert np.isnan(getattr(NaT, method)())
@pytest.mark.parametrize("method", [
"date", "now", "replace", "today",
"tz_convert", "tz_localize"
])
def test_nat_methods_nat(method):
# see gh-8254, gh-9513, gh-17329
assert getattr(NaT, method)() is NaT
@pytest.mark.parametrize("get_nat", [
lambda x: NaT,
lambda x: Timedelta(x),
lambda x: Timestamp(x)
])
def test_nat_iso_format(get_nat):
# see gh-12300
assert get_nat("NaT").isoformat() == "NaT"
@pytest.mark.parametrize("klass,expected", [
(Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]),
(Timedelta, ["components", "delta", "is_populated", "to_pytimedelta",
"to_timedelta64", "view"])
])
def test_missing_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# Here, we check which public methods NaT does not have. We
# ignore any missing private methods.
nat_names = dir(NaT)
klass_names = dir(klass)
missing = [x for x in klass_names if x not in nat_names and
not x.startswith("_")]
missing.sort()
assert missing == expected
def _get_overlap_public_nat_methods(klass, as_tuple=False):
"""
Get overlapping public methods between NaT and another class.
Parameters
----------
klass : type
The class to compare with NaT
as_tuple : bool, default False
Whether to return a list of tuples of the form (klass, method).
Returns
-------
overlap : list
"""
nat_names = dir(NaT)
klass_names = dir(klass)
overlap = [x for x in nat_names if x in klass_names and
not x.startswith("_") and
callable(getattr(klass, x))]
# Timestamp takes precedence over Timedelta in terms of overlap.
if klass is Timedelta:
ts_names = dir(Timestamp)
overlap = [x for x in overlap if x not in ts_names]
if as_tuple:
overlap = [(klass, method) for method in overlap]
overlap.sort()
return overlap
@pytest.mark.parametrize("klass,expected", [
(Timestamp, ["astimezone", "ceil", "combine", "ctime", "date", "day_name",
"dst", "floor", "fromisoformat", "fromordinal",
"fromtimestamp", "isocalendar", "isoformat", "isoweekday",
"month_name", "now", "replace", "round", "strftime",
"strptime", "time", "timestamp", "timetuple", "timetz",
"to_datetime64", "to_pydatetime", "today", "toordinal",
"tz_convert", "tz_localize", "tzname", "utcfromtimestamp",
"utcnow", "utcoffset", "utctimetuple", "weekday"]),
(Timedelta, ["total_seconds"])
])
def test_overlap_public_nat_methods(klass, expected):
# see gh-17327
#
# NaT should have *most* of the Timestamp and Timedelta methods.
# In case when Timestamp, Timedelta, and NaT are overlap, the overlap
# is considered to be with Timestamp and NaT, not Timedelta.
# "fromisoformat" was introduced in 3.7
if klass is Timestamp and not compat.PY37:
expected.remove("fromisoformat")
assert _get_overlap_public_nat_methods(klass) == expected
@pytest.mark.parametrize("compare", (
_get_overlap_public_nat_methods(Timestamp, True) +
_get_overlap_public_nat_methods(Timedelta, True))
)
def test_nat_doc_strings(compare):
# see gh-17327
#
# The docstrings for overlapping methods should match.
klass, method = compare
klass_doc = getattr(klass, method).__doc__
nat_doc = getattr(NaT, method).__doc__
assert klass_doc == nat_doc
_ops = {
"left_plus_right": lambda a, b: a + b,
"right_plus_left": lambda a, b: b + a,
"left_minus_right": lambda a, b: a - b,
"right_minus_left": lambda a, b: b - a,
"left_times_right": lambda a, b: a * b,
"right_times_left": lambda a, b: b * a,
"left_div_right": lambda a, b: a / b,
"right_div_left": lambda a, b: b / a,
}
@pytest.mark.parametrize("op_name", list(_ops.keys()))
@pytest.mark.parametrize("value,val_type", [
(2, "scalar"),
(1.5, "scalar"),
(np.nan, "scalar"),
(timedelta(3600), "timedelta"),
(Timedelta("5s"), "timedelta"),
(datetime(2014, 1, 1), "timestamp"),
(Timestamp("2014-01-01"), "timestamp"),
(Timestamp("2014-01-01", tz="UTC"), "timestamp"),
(Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"),
(pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"),
])
def test_nat_arithmetic_scalar(op_name, value, val_type):
# see gh-6873
invalid_ops = {
"scalar": {"right_div_left"},
"timedelta": {"left_times_right", "right_times_left"},
"timestamp": {"left_times_right", "right_times_left",
"left_div_right", "right_div_left"}
}
op = _ops[op_name]
if op_name in invalid_ops.get(val_type, set()):
if (val_type == "timedelta" and "times" in op_name and
isinstance(value, Timedelta)):
msg = "Cannot multiply"
else:
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
op(NaT, value)
else:
if val_type == "timedelta" and "div" in op_name:
expected = np.nan
else:
expected = NaT
assert op(NaT, value) is expected
@pytest.mark.parametrize("val,expected", [
(np.nan, NaT),
(NaT, np.nan),
(np.timedelta64("NaT"), np.nan)
])
def test_nat_rfloordiv_timedelta(val, expected):
# see gh-#18846
#
# See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
td = Timedelta(hours=3, minutes=4)
assert td // val is expected
@pytest.mark.parametrize("op_name", [
"left_plus_right", "right_plus_left",
"left_minus_right", "right_minus_left"
])
@pytest.mark.parametrize("value", [
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
TimedeltaIndex(["1 day", "2 day"], name="x"),
])
def test_nat_arithmetic_index(op_name, value):
# see gh-11718
exp_name = "x"
exp_data = [NaT] * 2
if isinstance(value, DatetimeIndex) and "plus" in op_name:
expected = DatetimeIndex(exp_data, name=exp_name, tz=value.tz)
else:
expected = TimedeltaIndex(exp_data, name=exp_name)
tm.assert_index_equal(_ops[op_name](NaT, value), expected)
@pytest.mark.parametrize("op_name", [
"left_plus_right", "right_plus_left",
"left_minus_right", "right_minus_left"
])
@pytest.mark.parametrize("box", [TimedeltaIndex, Series])
def test_nat_arithmetic_td64_vector(op_name, box):
# see gh-19124
vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
box_nat = box([NaT, NaT], dtype="timedelta64[ns]")
tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
def test_nat_pinned_docstrings():
# see gh-17327
assert NaT.ctime.__doc__ == datetime.ctime.__doc__
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
googlearchive/rgc-models | response_model/python/population_subunits/coarse/fitting/few_cells_tf_refractoring.py | 1 | 28010 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Fit subunits for multiple cells simultaneously.
This script has the extensions of single cell models from earlier
as well as new population subunit models -
most notably the almost convolutional model - where each subunit is
summation of mother subunit and subunit specific modification.
"""
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import scipy.io as sio
from scipy import ndimage
import random
FLAGS = flags.FLAGS
# flags for data location
flags.DEFINE_string('folder_name', 'experiment4',
'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
# flags for stochastic learning and loading data
# data is split and stored as small .mat files
flags.DEFINE_integer('batchsz', 1000, 'batch size for training')
flags.DEFINE_integer('n_chunks', 216, 'number of data chunks') # should be 216
flags.DEFINE_integer('n_b_in_c', 1, 'number of batches in one chunk of data')
flags.DEFINE_integer('train_len', 216 - 21, 'how much training length to use?')
flags.DEFINE_float('step_sz', 10, 'step size for learning algorithm')
# random number generators initialized
# removes unneccessary data variabilities while comparing algorithms
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
# flags for model/loss specification
flags.DEFINE_string('model_id', 'relu', 'which model to fit')
flags.DEFINE_string('loss', 'poisson', 'which loss to use?')
flags.DEFINE_string('masked_stimulus', 'False',
'use all pixels or only those inside RF of selected cells?')
flags.DEFINE_string('all_cells', 'True',
'learn model for all cells or a few chosen ones?')
# model specific terms
# subunit grid spacing
flags.DEFINE_float('su_grid_spacing', 3, 'grid spacing')
# useful for models which take a specific number of subunits as input
flags.DEFINE_integer('ratio_SU', 2, 'ratio of subunits/cells')
# useful for convolution-like models
flags.DEFINE_integer('window', 3,
'size of window for each subunit in relu_window model')
flags.DEFINE_integer('stride', 3, 'stride for relu_window')
# some models need regularization of parameters
flags.DEFINE_float('lam_w', 0.0001, 'sparsitiy regularization of w')
flags.DEFINE_float('lam_a', 0.0001, 'sparsitiy regularization of a')
FLAGS = flags.FLAGS
# global stimulus variables
stim_train_part = np.array([])
resp_train_part = np.array([])
chunk_order = np.array([])
cells_choose = np.array([])
chosen_mask = np.array([])
def get_test_data():
# the last chunk of data is test data
test_data_chunks = [FLAGS.n_chunks]
for ichunk in test_data_chunks:
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_part = data['maskedMovdd_part'].T
resp_part = data['Y_part'].T
test_len = stim_part.shape[0]
stim_part = stim_part[:, chosen_mask]
resp_part = resp_part[:, cells_choose]
return stim_part, resp_part, test_len
def get_next_training_batch(iteration):
# Returns a new batch of training data : stimulus and response arrays
# we will use global stimulus and response variables to permute training data
# chunks and store where we are in list of training data
# each chunk might have multiple training batches.
# So go through all batches in a 'chunk' before moving on to the next chunk
global stim_train_part
global resp_train_part
global chunk_order
togo = True
while togo:
if(iteration % FLAGS.n_b_in_c == 0):
# iteration is multiple of number of batches in a chunk means
# finished going through a chunk, load new chunk of data
ichunk = (iteration / FLAGS.n_b_in_c) % (FLAGS.train_len-1 ) # -1 as last one chunk used for testing
if (ichunk == 0):
# if starting over the chunks again, shuffle the chunks
chunk_order = np.random.permutation(np.arange(FLAGS.train_len)) # remove first chunk - weired?
if chunk_order[ichunk] + 1 != 1: # 1st chunk was weired for the dataset used
filename = FLAGS.data_location + 'Off_par_data_' + str(chunk_order[ichunk] + 1) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = data['maskedMovdd_part'] # stimulus
resp_train_part = data['Y_part'] # response
ichunk = chunk_order[ichunk] + 1
while stim_train_part.shape[1] < FLAGS.batchsz:
# if the current loaded data is smaller than batch size, load more chunks
if (ichunk > FLAGS.n_chunks):
ichunk = 2
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = np.append(stim_train_part, data['maskedMovdd_part'],
axis=1)
resp_train_part = np.append(resp_train_part, data['Y_part'], axis=1)
ichunk = ichunk + 1
ibatch = iteration % FLAGS.n_b_in_c # which section of current chunk to use
try:
stim_train = np.array(stim_train_part[:,ibatch: ibatch + FLAGS.batchsz],
dtype='float32').T
resp_train = np.array(resp_train_part[:,ibatch: ibatch + FLAGS.batchsz],
dtype='float32').T
togo=False
except:
iteration = np.random.randint(1,100000)
print('Load exception iteration: ' + str(iteration) +
'chunk: ' + str(chunk_order[ichunk]) + 'batch: ' + str(ibatch) )
togo=True
stim_train = stim_train[:, chosen_mask]
resp_train = resp_train[:, cells_choose]
return stim_train, resp_train, FLAGS.batchsz
def get_windows():
# use FLAGS to get convolutional 'windows' for convolutional models.
window = FLAGS.window # 2*window +1 is the width and height of windows
n_pix = (2* window + 1) ** 2 # number of pixels in the window
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
# make mask_tf: weight (dimx X dimy X npix) for convolutional layer,
# where each layer is 1 for a particular pixel in window and 0 for others.
# This is used for flattening the pixels in a window,
# so that different weights could be applied to each window
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# number of windows in x and y dimensions
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
return mask_tf, dimx, dimy, n_pix
def main(argv):
# global variables will be used for getting training data
global cells_choose
global chosen_mask
global chunk_order
# set random seeds: when same algorithm run with different FLAGS,
# the sequence of random data is same.
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
# initial chunk order (will be re-shuffled everytime we go over a chunk)
chunk_order = np.random.permutation(np.arange(FLAGS.n_chunks-1))
# Load data summary
data_filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(data_filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
# which cells to train subunits for
if FLAGS.all_cells == 'True':
cells_choose = np.array(np.ones(np.shape(cells)), dtype='bool')
else:
cells_choose = (cells ==3287) | (cells ==3318 ) | (cells ==3155) | (cells ==3066)
n_cells = np.sum(cells_choose) # number of cells
# load spikes and relevant stimulus pixels for chosen cells
tot_spks = np.squeeze(data_summary['tot_spks'])
tot_spks_chosen_cells = np.array(tot_spks[cells_choose] ,dtype='float32')
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
# chosen_mask = which pixels to learn subunits over
if FLAGS.masked_stimulus == 'True':
chosen_mask = np.array(np.sum(total_mask[cells_choose,:],0)>0, dtype='bool')
else:
chosen_mask = np.array(np.ones(3200).astype('bool'))
stim_dim = np.sum(chosen_mask) # stimulus dimensions
print('\ndataset summary loaded')
# print parameters
print('Save folder name: ' + str(FLAGS.folder_name) +
'\nmodel:' + str(FLAGS.model_id) +
'\nLoss:' + str(FLAGS.loss) +
'\nmasked stimulus:' + str(FLAGS.masked_stimulus) +
'\nall_cells?' + str(FLAGS.all_cells) +
'\nbatch size' + str(FLAGS.batchsz) +
'\nstep size' + str(FLAGS.step_sz) +
'\ntraining length: ' + str(FLAGS.train_len) +
'\nn_cells: '+str(n_cells))
# decide the number of subunits to fit
n_su = FLAGS.ratio_SU*n_cells
# filename for saving file
short_filename = ('_masked_stim=' + str(FLAGS.masked_stimulus) + '_all_cells='+
str(FLAGS.all_cells) + '_loss='+
str(FLAGS.loss) + '_batch_sz='+ str(FLAGS.batchsz) +
'_step_sz'+ str(FLAGS.step_sz) +
'_tlen=' + str(FLAGS.train_len) + '_bg')
with tf.Session() as sess:
# set up stimulus and response palceholders
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
if FLAGS.loss == 'poisson':
b_init = np.array(0.000001*np.ones(n_cells)) # a very small positive bias needed to avoid log(0) in poisson loss
else:
b_init = np.log((tot_spks_chosen_cells)/(216000. - tot_spks_chosen_cells)) # log-odds, a good initialization for some losses (like logistic)
# different firing rate models
if FLAGS.model_id == 'exp_additive':
# This model was implemented for earlier work.
# firing rate for cell c: lam_c = sum_s exp(w_s.x + a_sc)
# filename
short_filename = ('model=' + str(FLAGS.model_id) + short_filename)
# variables
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su),
dtype='float32'), name='w')
a = tf.Variable(np.array(0.01 * np.random.rand(n_cells, 1, n_su),
dtype='float32'), name='a')
# firing rate model
lam = tf.transpose(tf.reduce_sum(tf.exp(tf.matmul(stim, w) + a), 2))
regularization = 0
vars_fit = [w, a]
def proj(): # called after every training step - to project to parameter constraints
pass
if FLAGS.model_id == 'relu':
# firing rate for cell c: lam_c = a_c'.relu(w.x) + b
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) + '_lam_a=' +
str(FLAGS.lam_a) + '_nsu=' + str(n_su) + short_filename)
# variables
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su),
dtype='float32'), name='w')
a = tf.Variable(np.array(0.01 * np.random.rand(n_su, n_cells),
dtype='float32'), name='a')
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
# firing rate model
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + b
vars_fit = [w, a] # which variables are learnt
if not FLAGS.loss == 'poisson': # don't learn b for poisson loss
vars_fit = vars_fit + [b]
# regularization of parameters
regularization = (FLAGS.lam_w * tf.reduce_sum(tf.abs(w)) +
FLAGS.lam_a * tf.reduce_sum(tf.abs(a)))
# projection to satisfy constraints
a_pos = tf.assign(a, (a + tf.abs(a))/2)
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
sess.run(a_pos)
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window':
# firing rate for cell c: lam_c = a_c'.relu(w.x) + b,
# where w_i are over a small window which are convolutionally related with each other.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename )
mask_tf, dimx, dimy, n_pix = get_windows() # get convolutional windows
# variables
w = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'), name='w')
a = tf.Variable(np.array(np.random.rand(dimx*dimy, n_cells),dtype='float32'), name='a')
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w, a] # which variables are learnt
if not FLAGS.loss == 'poisson': # don't learn b for poisson loss
vars_fit = vars_fit + [b]
# stimulus filtered with convolutional windows
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_wts = tf.nn.relu(tf.reduce_sum(tf.mul(stim_masked, w), 3))
# get firing rate
lam = tf.matmul(tf.reshape(stim_wts, [-1,dimx*dimy]),a) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w))
# projection to satisfy hard variable constraints
a_pos = tf.assign(a, (a + tf.abs(a))/2)
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
sess.run(a_pos)
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window_mother':
# firing rate for cell c: lam_c = a_c'.relu(w.x) + b,
# where w_i are over a small window which are convolutionally related with each other.
# w_i = w_mother + w_del_i,
# where w_mother is common accross all 'windows' and w_del is different for different windows.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename )
mask_tf, dimx, dimy, n_pix = get_windows()
# variables
w_del = tf.Variable(np.array( 0.05*np.random.randn(dimx, dimy, n_pix),
dtype='float32'), name='w_del')
w_mother = tf.Variable(np.array( np.ones((2 * FLAGS.window + 1,
2 * FLAGS.window + 1, 1, 1)),
dtype='float32'), name='w_mother')
a = tf.Variable(np.array(np.random.rand(dimx*dimy, n_cells),
dtype='float32'), name='a')
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w_mother, w_del, a] # which variables to learn
if not FLAGS.loss == 'poisson':
vars_fit = vars_fit + [b]
# stimulus filtered with convolutional windows
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D,
w_mother,
strides=[1, FLAGS.stride,
FLAGS.stride, 1],
padding="VALID"),3)
stim_masked = tf.nn.conv2d(stim4D,
mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
# activation of differnet subunits
su_act = tf.nn.relu(stim_del + stim_convolved)
# get firing rate
lam = tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]),a) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
# projection to satisfy hard variable constraints
a_pos = tf.assign(a, (a + tf.abs(a))/2)
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
sess.run(a_pos)
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window_mother_sfm':
# firing rate for cell c: lam_c = a_sfm_c'.relu(w.x) + b,
# a_sfm_c = softmax(a) : so a cell cannot be connected to all subunits equally well.
# where w_i are over a small window which are convolutionally related with each other.
# w_i = w_mother + w_del_i,
# where w_mother is common accross all 'windows' and w_del is different for different windows.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename)
mask_tf, dimx, dimy, n_pix = get_windows()
# variables
w_del = tf.Variable(np.array( 0.05*np.random.randn(dimx, dimy, n_pix),
dtype='float32'), name='w_del')
w_mother = tf.Variable(np.array( np.ones((2 * FLAGS.window + 1,
2 * FLAGS.window + 1, 1, 1)),
dtype='float32'), name='w_mother')
a = tf.Variable(np.array(np.random.randn(dimx*dimy, n_cells),
dtype='float32'), name='a')
a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a)))
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w_mother, w_del, a] # which variables to fit
if not FLAGS.loss == 'poisson':
vars_fit = vars_fit + [b]
# stimulus filtered with convolutional windows
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_convolved = tf.reduce_sum(tf.nn.conv2d(stim4D,
w_mother,
strides=[1, FLAGS.stride,
FLAGS.stride, 1],
padding="VALID"),3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
# activation of differnet subunits
su_act = tf.nn.relu(stim_del + stim_convolved)
# get firing rate
lam = tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]), a_sfm) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
# projection to satisfy hard variable constraints
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window_mother_sfm_exp':
# firing rate for cell c: lam_c = exp(a_sfm_c'.relu(w.x)) + b,
# a_sfm_c = softmax(a) : so a cell cannot be connected to all subunits equally well.
# exponential output NL would cancel the log() in poisson and might get better estimation properties.
# where w_i are over a small window which are convolutionally related with each other.
# w_i = w_mother + w_del_i,
# where w_mother is common accross all 'windows' and w_del is different for different windows.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename)
# get windows
mask_tf, dimx, dimy, n_pix = get_windows()
# declare variables
w_del = tf.Variable(np.array( 0.05*np.random.randn(dimx, dimy, n_pix),
dtype='float32'), name='w_del')
w_mother = tf.Variable(np.array( np.ones((2 * FLAGS.window + 1,
2 * FLAGS.window + 1, 1, 1)),
dtype='float32'), name='w_mother')
a = tf.Variable(np.array(np.random.randn(dimx*dimy, n_cells),
dtype='float32'), name='a')
a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a)))
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w_mother, w_del, a]
if not FLAGS.loss == 'poisson':
vars_fit = vars_fit + [b]
# filter stimulus
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D,
w_mother,
strides=[1, FLAGS.stride,
FLAGS.stride, 1],
padding="VALID"),3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
# get subunit activation
su_act = tf.nn.relu(stim_del + stim_convolved)
# get cell firing rates
lam = tf.exp(tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]), a_sfm)) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
# projection to satisfy hard variable constraints
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
if FLAGS.loss == 'poisson':
sess.run(b_pos)
# different loss functions
if FLAGS.loss == 'poisson':
loss_inter = (tf.reduce_sum(lam)/120. -
tf.reduce_sum(resp*tf.log(lam))) / data_len
if FLAGS.loss == 'logistic':
loss_inter = tf.reduce_sum(tf.nn.softplus(-2 * (resp - 0.5)*lam)) / data_len
if FLAGS.loss == 'hinge':
loss_inter = tf.reduce_sum(tf.nn.relu(1 -2 * (resp - 0.5)*lam)) / data_len
loss = loss_inter + regularization # add regularization to get final loss function
# training consists of calling training()
# which performs a train step and
# project parameters to model specific constraints using proj()
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,
var_list=
vars_fit)
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict) # one step of gradient descent
proj() # model specific projection operations
# evaluate loss on given data.
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
# saving details
# make a folder with name derived from parameters of the algorithm
# - it saves checkpoint files and summaries used in tensorboard
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
# make folder if it does not exist
if not gfile.IsDirectory(parent_folder):
gfile.MkDir(parent_folder)
FLAGS.save_location = parent_folder + short_filename + '/'
if not gfile.IsDirectory(FLAGS.save_location):
gfile.MkDir(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
# create summary writers
# create histogram summary for all parameters which are learnt
for ivar in vars_fit:
tf.histogram_summary(ivar.name, ivar)
# loss summary
l_summary = tf.scalar_summary('loss',loss)
# loss without regularization summary
l_inter_summary = tf.scalar_summary('loss_inter',loss_inter)
# Merge all the summary writer ops into one op (this way,
# calling one op stores all summaries)
merged = tf.merge_all_summaries()
# training and testing has separate summary writers
train_writer = tf.train.SummaryWriter(FLAGS.save_location + 'train',
sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.save_location + 'test')
## Fitting procedure
print('Start fitting')
sess.run(tf.initialize_all_variables())
saver_var = tf.train.Saver(tf.all_variables(),
keep_checkpoint_every_n_hours=0.05)
load_prev = False
start_iter=0
try:
# restore previous fits if they are available
# - useful when programs are preempted frequently on .
latest_filename = short_filename + '_latest_fn'
restore_file = tf.train.latest_checkpoint(FLAGS.save_location,
latest_filename)
# restore previous iteration count and start from there.
start_iter = int(restore_file.split('/')[-1].split('-')[-1])
saver_var.restore(sess, restore_file) # restore variables
load_prev = True
except:
print('No previous dataset')
if load_prev:
print('Previous results loaded')
else:
print('Variables initialized')
# Finally, do fitting
icnt = 0
# get test data and make test dictionary
stim_test,resp_test,test_length = get_test_data()
fd_test = {stim: stim_test,
resp: resp_test,
data_len: test_length}
for istep in np.arange(start_iter,400000):
print(istep)
# get training data and make test dictionary
stim_train, resp_train, train_len = get_next_training_batch(istep)
fd_train = {stim: stim_train,
resp: resp_train,
data_len: train_len}
# take training step
training(fd_train)
if istep%10 == 0:
# compute training and testing losses
ls_train = get_loss(fd_train)
ls_test = get_loss(fd_test)
latest_filename = short_filename + '_latest_fn'
saver_var.save(sess, save_filename, global_step=istep,
latest_filename = latest_filename)
# add training summary
summary = sess.run(merged, feed_dict=fd_train)
train_writer.add_summary(summary,istep)
# add testing summary
summary = sess.run(merged, feed_dict=fd_test)
test_writer.add_summary(summary,istep)
print(istep, ls_train, ls_test)
icnt += FLAGS.batchsz
if icnt > 216000-1000:
icnt = 0
tms = np.random.permutation(np.arange(216000-1000))
if __name__ == '__main__':
app.run()
| apache-2.0 |
mhue/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 84 | 14181 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
mattgiguere/pyutil | tests/test_wgtdMean.py | 1 | 5929 | #!/usr/bin/env python
"""
Created on 2015-03-19T16:19:41
"""
from __future__ import division, print_function
import sys
from pyutil.pyutil import wgtdMean as wm
try:
import numpy as np
except ImportError:
print('You need numpy installed')
sys.exit(1)
try:
import pandas as pd
except ImportError:
print('You need pandas installed')
sys.exit(1)
__author__ = "Matt Giguere (github: @mattgiguere)"
__maintainer__ = "Matt Giguere"
__email__ = "[email protected]"
__status__ = " Development NOT(Prototype or Production)"
__version__ = '0.0.1'
def test_getNewTimes_one_day_bin_ten_days():
"""
Test to make sure `getNewTimes` returns ten elements
when 75 elements are entered over a ten day span with
one day binning in place.
"""
times = np.random.uniform(0, 10, 75)
newtimes = wm.getNewTimes(times, 1.)
print(len(newtimes))
assert len(newtimes) == 10
def test_getNewTimes_with_half_phase_one_day_bin():
"""
Test to make sure times are shifted
properly if the phase optional argument
is used.
"""
times = np.random.uniform(0, 10, 75)
newtimes = wm.getNewTimes(times, 1.)
newtimes2 = wm.getNewTimes(times, 1., phase=0.5)
assert np.round((np.min(newtimes2) - np.min(newtimes)), 7) == 0.5
def test_getNewTimes_with_half_phase_two_day_bin():
"""
Test to make sure times are shifted
properly if the phase optional argument
is used with a two day bin.
"""
times = np.random.uniform(0, 10, 75)
newtimes = wm.getNewTimes(times, 2.)
newtimes2 = wm.getNewTimes(times, 2., phase=0.5)
assert np.round((np.min(newtimes2) - np.min(newtimes)), 7) == 1.000
def test_getNewVals_for_newrvs_dim():
"""
The output dimension of newRVs should be the same as the
input dimension of newtimes
"""
newtimes = np.arange(10)
times = np.random.uniform(0, 10, 100)
rvs = np.random.uniform(-5, 5, 100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)
assert len(newtimes) == len(newRVs)
def test_getNewVals_for_newuncs_dim():
"""
The output dimension of newRVs should be the same as the
input dimension of newtimes
"""
newtimes = np.arange(10)
times = np.random.uniform(0, 10, 100)
rvs = np.random.uniform(-5, 5, 100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)
assert len(newtimes) == len(newUncs)
def test_getNewVals_rv_scatter():
"""
The RV scatter (standard deviation from normally distributed points
about the mean should be reduced when binning observations down. This
routine checks that.
"""
newtimes = np.arange(10)
times = np.random.uniform(0, 10, 100)
rvs = np.random.normal(loc=0, scale=5, size=100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)
assert np.std(newRVs) < np.std(rvs)
def test_getNewVals_unc_magnitude():
"""
The median single measurement precision should be lower
for the binned data. This function ensures that to be the case.
"""
newtimes = np.arange(10)
times = np.random.uniform(0, 10, 100)
rvs = np.random.normal(loc=0, scale=5, size=100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs, uncs, timebin=1.)
assert np.median(newUncs) < np.median(uncs)
def test_wgtdMeans_number_of_columns():
"""
Test the number of returned columns
"""
dfi = pd.DataFrame()
dfi["JD"] = np.random.uniform(0, 10, 100)
dfi["mnvel"] = np.random.normal(loc=0, scale=5, size=100)
dfi["errvel"] = np.random.normal(loc=1., scale=0.5, size=100)
dfo = wm.wgtdMeans(dfi, timebin=1.0)
assert len(dfo.columns) == 3
def test_wgtdMeans_number_of_rows():
"""
Test the number of returned rows
"""
dfi = pd.DataFrame()
dfi["JD"] = np.random.uniform(0, 10, 100)
dfi["mnvel"] = np.random.normal(loc=0, scale=5, size=100)
dfi["errvel"] = np.random.normal(loc=1., scale=0.5, size=100)
dfo = wm.wgtdMeans(dfi, timebin=1.0)
assert len(dfo) >= 9
def test_big_gaps_getNewVals():
"""
Ensure getNewVals routine can handle big gaps in times
"""
timebin = 1.
times = np.concatenate((np.random.uniform(0, 10, 50),
np.random.uniform(30, 40, 50)))
newtimes = wm.getNewTimes(times, timebin)
rvs = np.random.normal(loc=0, scale=5, size=100)
uncs = np.random.normal(loc=1., scale=0.5, size=100)
newRVs, newUncs = wm.getNewVals(newtimes, times, rvs,
uncs, timebin=timebin)
fins = np.where(np.isfinite(newUncs))
newRVs = newRVs[fins]
newUncs = newUncs[fins]
newtimes = newtimes[fins]
assert np.median(newUncs) < np.median(uncs)
def test_big_gaps_main_routine():
"""
Test to make sure the code handles big gaps well
"""
dfi = pd.DataFrame()
dfi["JD"] = np.concatenate((np.random.uniform(0, 10, 50),
np.random.uniform(40, 50, 50)))
dfi["mnvel"] = np.random.normal(loc=0, scale=5, size=100)
dfi["errvel"] = np.random.normal(loc=1., scale=0.5, size=100)
dfo = wm.wgtdMeans(dfi, timebin=1.0)
assert len(dfo) >= 9
def test_returnNan_option():
"""
Make sure the returnNan removes NaN elements.
"""
dfi = pd.DataFrame()
dfi["JD"] = np.concatenate((np.random.uniform(0, 10, 50),
np.random.uniform(40, 50, 50)))
dfi["mnvel"] = np.random.normal(loc=0, scale=5, size=100)
dfi["errvel"] = np.random.normal(loc=1., scale=0.5, size=100)
dfo = wm.wgtdMeans(dfi, timebin=1.0, returnNan=False)
assert len(dfo) == len(dfo[np.isfinite(dfo["mnvel"])])
| mit |
jorge2703/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
gakarak/BTBDB_ImageAnalysisSubPortal | app/core/segmct/__init__.py | 1 | 20308 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import glob
import numpy as np
import nibabel as nib
from app.core.segmct.fcnn_lung2d import BatcherCTLung2D
from app.core.segmct.fcnn_lesion3d import BatcherCTLesion3D
from app.core.segmct import fcnn_lesion3dv3 as fcn3dv3
# import app.core.segmct.
from app.core.segmct.fcnn_lesion3dv2 import Inferencer as InferencerLesion3Dv2, lesion_id2name, lesion_id2rgb, lesion_name2id
import json
import skimage.io as skio
import app.core.preprocessing as preproc
from app.core.preprocessing import resizeNii, resize3D
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import app.core.lesion_descriptors as ldsc
#########################################
def segmentLungs25D(pathInpNii, dirWithModel, pathOutNii=None, outSize=None, batchSize=8, isDebug=False, threshold=None):
if isinstance(pathInpNii,str):# or isinstance(pathInpNii,unicode):
isInpFromFile = True
if not os.path.isfile(pathInpNii):
raise Exception('Cant find input file [%s]' % pathInpNii)
else:
isInpFromFile = False
if not os.path.isdir(dirWithModel):
raise Exception('Cant find directory with model [%s]' % dirWithModel)
if pathOutNii is not None:
outDir = os.path.dirname(os.path.abspath(pathOutNii))
if not os.path.isdir(outDir):
raise Exception('Cant find output directory [%s], create directory for output file before this call' % outDir)
batcherInfer = BatcherCTLung2D()
batcherInfer.loadModelForInference(pathModelJson=dirWithModel, pathMeanData=dirWithModel)
if isDebug:
batcherInfer.model.summary()
lstPathNifti = [ pathInpNii ]
ret = batcherInfer.inference(lstPathNifti, batchSize=batchSize, isDebug=isDebug)
outMsk = ret[0]
if isInpFromFile:
tmpNii = nib.load(pathInpNii)
else:
tmpNii = pathInpNii
#
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.float16), tmpNii.affine, header=tmpNii.header)
# resize if need:
if outSize is not None:
outMskNii = resizeNii(outMskNii, newSize=outSize)
# threshold if need:
if threshold is not None:
outMskNii = nib.Nifti1Image( (outMskNii.get_data()>threshold).astype(np.float16), outMskNii.affine, header=outMskNii.header)
# save if output path is present
if pathOutNii is not None:
nib.save(outMskNii, pathOutNii)
# pathOutNii = '%s-segm.nii.gz' % pathInpNii
else:
return outMskNii
#########################################
def segmentLesions3D(pathInpNii, dirWithModel, pathOutNii=None, outSize=None, isDebug=False, threshold=None):
if isinstance(pathInpNii, str):# or isinstance(pathInpNii, unicode):
isInpFromFile = True
if not os.path.isfile(pathInpNii):
raise Exception('Cant find input file [%s]' % pathInpNii)
else:
isInpFromFile = False
if not os.path.isdir(dirWithModel):
raise Exception('Cant find directory with model [%s]' % dirWithModel)
if pathOutNii is not None:
outDir = os.path.dirname(os.path.abspath(pathOutNii))
if not os.path.isdir(outDir):
raise Exception(
'Cant find output directory [%s], create directory for output file before this call' % outDir)
batcherInfer = BatcherCTLesion3D()
batcherInfer.loadModelForInference(pathModelJson=dirWithModel, pathMeanData=dirWithModel)
if isDebug:
batcherInfer.model.summary()
ret = batcherInfer.inference([pathInpNii], batchSize=1)
if batcherInfer.isTheanoShape:
outMsk = ret[0][1, :, :, :]
else:
outMsk = ret[0][:, :, :, 1]
if isInpFromFile:
tmpNii = nib.load(pathInpNii)
else:
tmpNii = pathInpNii
#
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.float16), tmpNii.affine, header=tmpNii.header)
if outSize is not None:
outMskNii = resizeNii(outMskNii, newSize=outSize)
if threshold is not None:
outMskNii = nib.Nifti1Image((outMskNii.get_data() > threshold).astype(np.float16),
outMskNii.affine,
header=outMskNii.header)
if pathOutNii is not None:
nib.save(outMskNii, pathOutNii)
# pathOutNii = '%s-segm.nii.gz' % pathInpNii
else:
return outMskNii
#########################################
def segmentLesions3Dv2(pathInpNii, dirWithModel, pathOutNii=None, outSize=None, isDebug=False, threshold=None, path_lungs=None):
if isinstance(pathInpNii, str):# or isinstance(pathInpNii, unicode):
isInpFromFile = True
if not os.path.isfile(pathInpNii):
raise Exception('Cant find input file [%s]' % pathInpNii)
else:
isInpFromFile = False
if not os.path.isdir(dirWithModel):
raise Exception('Cant find directory with model [%s]' % dirWithModel)
if pathOutNii is not None:
outDir = os.path.dirname(os.path.abspath(pathOutNii))
if not os.path.isdir(outDir):
raise Exception(
'Cant find output directory [%s], create directory for output file before this call' % outDir)
batcherInfer = InferencerLesion3Dv2()
batcherInfer.load_model(path_model=dirWithModel)
if isDebug:
batcherInfer.model.summary()
ret = batcherInfer.inference([pathInpNii], batchSize=1)
outMsk = ret[0]
if isInpFromFile:
tmpNii = nib.load(pathInpNii)
else:
tmpNii = pathInpNii
#
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.uint8), tmpNii.affine, header=tmpNii.header)
if outSize is not None:
outMskNii = resizeNii(outMskNii, newSize=outSize, parOrder = 0)
if path_lungs is not None:
tmp_affine = outMskNii.affine
tmp_header = outMskNii.header
msk_lungs = resizeNii(path_lungs, newSize=outSize, parOrder=0).get_data()
outMsk = outMskNii.get_data().astype(np.uint8)
outMsk[msk_lungs < 0.5] = 0
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.uint8), tmp_affine, header=tmp_header)
# if threshold is not None:
# outMskNii = nib.Nifti1Image((outMskNii.get_data() > threshold).astype(np.float16),
# outMskNii.affine,
# header=outMskNii.header)
if pathOutNii is not None:
nib.save(outMskNii, pathOutNii)
# pathOutNii = '%s-segm.nii.gz' % pathInpNii
else:
return outMskNii
def segmentLesions3Dv3(nii_img, dir_with_model, nii_lings=None, path_out_nii=None, out_size=None, is_debug=False, threshold=None):
# if isinstance(nii_img, str):# or isinstance(pathInpNii, unicode):
# isInpFromFile = True
# if not os.path.isfile(nii_img):
# raise Exception('Cant find input file [%s]' % nii_img)
# else:
# isInpFromFile = False
if not os.path.isdir(dir_with_model):
raise Exception('Cant find directory with model [%s]' % dir_with_model)
if path_out_nii is not None:
outDir = os.path.dirname(os.path.abspath(path_out_nii))
if not os.path.isdir(outDir):
raise Exception(
'Cant find output directory [%s], create directory for output file before this call' % outDir)
data_shape = [512, 512, 256]
args = fcn3dv3.get_args_obj()
path_model = glob.glob('{}/*.h5'.format(dir_with_model))
if len(path_model) < 1:
raise FileNotFoundError('Cant find any keras model in diractory [{}] for lesion detection'.format(dir_with_model))
args.model = path_model[0]
# args.img = nii_img
# args.lung = nii_lings
# args.infer_out = path_msk_out
cfg = fcn3dv3.Config(args)
model = fcn3dv3.build_model(cfg, inp_shape=list(cfg.infer_crop_pad) + [1])
model.summary()
model.load_weights(path_model[0], by_name=True)
msk_cls, msk_val = fcn3dv3.run_inference_crdf(cfg, model, nii_img, nii_lings)
return msk_cls, msk_val
# batcherInfer = InferencerLesion3Dv2()
# batcherInfer.load_model(path_model=dir_with_model)
# if is_debug:
# batcherInfer.model.summary()
# ret = batcherInfer.inference([nii_img], batchSize=1)
# outMsk = ret[0]
# if isInpFromFile:
# tmpNii = nib.load(nii_img)
# else:
# tmpNii = nii_img
# #
# outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.uint8), tmpNii.affine, header=tmpNii.header)
# if out_size is not None:
# outMskNii = resizeNii(outMskNii, newSize=out_size, parOrder = 0)
# if path_lungs is not None:
# tmp_affine = outMskNii.affine
# tmp_header = outMskNii.header
# msk_lungs = resizeNii(path_lungs, newSize=out_size, parOrder=0).get_data()
# outMsk = outMskNii.get_data().astype(np.uint8)
# outMsk[msk_lungs < 0.5] = 0
# outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.uint8), tmp_affine, header=tmp_header)
# # if threshold is not None:
# # outMskNii = nib.Nifti1Image((outMskNii.get_data() > threshold).astype(np.float16),
# # outMskNii.affine,
# # header=outMskNii.header)
# if path_out_nii is not None:
# nib.save(outMskNii, path_out_nii)
# # pathOutNii = '%s-segm.nii.gz' % pathInpNii
# else:
# return outMskNii
#########################################
def api_segmentLungAndLesion(dirModelLung, dirModelLesion, series,
ptrLogger=None,
shape4Lung = (256, 256, 64), shape4Lesi = (512, 512, 256), gpuMemUsage=0.4):
# (1) msg-helpers
def msgInfo(msg):
if ptrLogger is not None:
ptrLogger.info(msg)
else:
print (msg)
def msgErr(msg):
if ptrLogger is not None:
ptrLogger.error(msg)
else:
print (msg)
# (2.1) check data
if not series.isInitialized():
msgErr('Series is not initialized, skip .. [{0}]'.format(series))
return False
# if not series.isDownloaded():
# msgErr('Series data is not downloaded, skip .. [{0}]'.format(series))
# return False
if not series.isConverted():
msgErr('Series DICOM data is not converted to Nifti format, skip .. [{0}]'.format(series))
return False
# (2.2) check existing files
pathNii = series.pathConvertedNifti(isRelative=False)
# pathSegmLungs = series.pathPostprocLungs(isRelative=False)
pathSegmLungs = series.pathPostprocLungs(isRelative=False)
pathSegmLesions = series.pathPostprocLesions2(isRelative=False)
if os.path.isfile(pathSegmLungs) and os.path.isfile(pathSegmLesions):
msgInfo('Series data is already segmented, skip task ... [{0}]'.format(series))
return False
else:
# (2.3.0) TF GPU memory usage constraints
# FIXME:temporary fix, in future: apped memory usage parameter in application config
import tensorflow as tf
import keras.backend as K
from keras.backend.tensorflow_backend import set_session
if K.image_dim_ordering() == 'tf':
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpuMemUsage
set_session(tf.Session(config=config))
# (2.3.1) load and resize
try:
dataNii = nib.load(pathNii)
shapeOrig = dataNii.shape
niiResiz4Lung = resizeNii(dataNii, shape4Lung)
except Exception as err:
msgErr('Cant load and resize input nifti file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.2) segment lungs
try:
if not os.path.isfile(pathSegmLungs):
K.clear_session()
lungMask = segmentLungs25D(niiResiz4Lung,
dirWithModel=dirModelLung,
pathOutNii=None,
outSize=shapeOrig,
# outSize=shape4Lung,
threshold=0.5)
nib.save(lungMask, pathSegmLungs)
else:
pass
# lungMask = nib.load(pathSegmLungs)
except Exception as err:
msgErr('Cant segment lungs for file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.3) segment lesions
try:
if not os.path.isfile(pathSegmLesions):
# lesionMask = segmentLesions3D(niiResiz4Lesi,
# dirWithModel=dirModelLesion,
# pathOutNii=None,
# outSize=shapeOrig,
# # outSize=shape4Lung,
# threshold=None)
K.clear_session()
shape_lesions = [512, 512, 256]
nii_lung_resiz4lesion = resizeNii(pathSegmLungs, shape_lesions, parOrder=0)
nii_data_resiz4lesion = resizeNii(dataNii, shape_lesions, parOrder=1)
lesionMask, lesionMaskVal = segmentLesions3Dv3(nii_data_resiz4lesion,
dir_with_model=dirModelLesion,
nii_lings=nii_lung_resiz4lesion,
path_out_nii=None,
# outSize=shapeOrig,
# outSize=shape4Lung,
threshold=None)
# lesionMask = segmentLesions3Dv2(niiResiz4Lesi,
# dirWithModel=dirModelLesion,
# pathOutNii=None,
# outSize=shapeOrig,
# # outSize=shape4Lung,
# threshold=None,
# path_lungs=pathSegmLungs)
# (2.3.4) save results
try:
lesionMask = resizeNii(lesionMask, shapeOrig, parOrder=0)
nib.save(lesionMask, pathSegmLesions)
except Exception as err:
msgErr('Cant save segmentation results to file [{0}] : {1}, for series [{2}]'.format(pathSegmLesions, err, series))
return False
except Exception as err:
msgErr('Cant segment lesions for file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
return True
def api_generateAllReports(series,
dirModelLung, dirModelLesion,
ptrLogger=None,
shape4Lung = (256, 256, 64), shape4Lesi = (256, 256, 64)):
# (1) msg-helpers
def msgInfo(msg):
if ptrLogger is not None:
ptrLogger.info(msg)
else:
print (msg)
def msgErr(msg):
if ptrLogger is not None:
ptrLogger.error(msg)
else:
print (msg)
# (0) prepare path-variables
pathNii = series.pathConvertedNifti(isRelative=False)
pathSegmLungs = series.pathPostprocLungs(isRelative=False)
pathSegmLungsDiv2 = series.pathPostprocLungsDiv2(isRelative=False)
# pathSegmLesions1 = series.pathPostprocLesions(isRelative=False)
pathSegmLesions1 = series.pathPostprocLesions2(isRelative=False)
pathPreview2 = series.pathPostprocPreview(isRelative=False, previewId=2)
pathPreview3 = series.pathPostprocPreview(isRelative=False, previewId=3)
pathPreview4 = series.pathPostprocPreview(isRelative=False, previewId=4)
pathReport = series.pathPostprocReport(isRelative=False)
# (1) Lung/Lesions segmentation
retSegm = api_segmentLungAndLesion(dirModelLung=dirModelLung,
dirModelLesion=dirModelLesion,
series=series,
ptrLogger=ptrLogger,
shape4Lung=shape4Lung,
shape4Lesi=shape4Lesi)
msgInfo('Segmentation Lung/Lesion isOk = {0}'.format(retSegm))
if (not os.path.isfile(pathSegmLungs)) or (not os.path.isfile(pathSegmLesions1)):
msgErr('Cant segment Lung/Lesion, skip... [{0}]'.format(series))
return False
# (1.1) loading lungs-masks/lesions-masks
try:
niiLung = nib.load(pathSegmLungs)
niiLesion = nib.load(pathSegmLesions1)
except Exception as err:
msgErr('Cant load Lung/Lesion Nifti data: [{0}], for {1}'.format(err, pathSegmLesions1))
return False
# (2) prepare divided lungs
if not os.path.isfile(pathSegmLungsDiv2):
niiLungDiv, _ = preproc.makeLungedMaskNii(niiLung)
nib.save(niiLungDiv, pathSegmLungsDiv2)
else:
niiLungDiv = nib.load(pathSegmLungsDiv2)
# (3) calc lesion score
# try:
# retLesionScoreBin, retLesionScoreById, retLesionScoreByName = preproc.prepareLesionDistribInfoV2(niiLung, niiLesion, niiLungDIV2=niiLungDiv)
# except Exception as err:
# msgErr('Cant evaluate Lesion-score: [{0}], for {1}'.format(err, pathSegmLesions1))
# return False
# (3.1) calc cbir-descriptor
texture_asymmetry = None
try:
cbir_desc = ldsc.calc_desc(pathSegmLungsDiv2, pathSegmLesions1)
cbir_desc_json = ldsc.desc_to_json(cbir_desc)
texture_asymmetry = ldsc.desc_asymmetry(desc_=cbir_desc)
except Exception as err:
msgErr('Cant evaluate Lesion-score: [{0}], for {1}'.format(err, pathSegmLesions1))
return False
# (4) prepare short report about lungs
try:
retLungInfo = preproc.prepareLungSizeInfoNii(niiLungDiv)
if texture_asymmetry is not None:
# retLungInfo['asymmetry'][1]["value"] = '%0.3f' % texture_asymmetry
retLungInfo['asymmetry'][1]["value"] = float(texture_asymmetry)
except Exception as err:
msgErr('Cant get Lung information : [{0}], for {1}'.format(err, series))
return False
# (5) generate preview & save preview image
# try:
dataImg = preproc.normalizeCTImage(nib.load(pathNii).get_data())
# dataMsk = np.round(niiLung.get_data()).astype(np.uint8)
dataMsk = np.round(niiLungDiv.get_data()).astype(np.uint8)
dataLes = np.round(niiLesion.get_data()).astype(np.uint8)
imgPreviewJson2 = preproc.genPreview2D(dataImg, dataMsk, dataLes, pathPreview2, 2)
imgPreviewJson3 = preproc.genPreview2D(dataImg, dataMsk, dataLes, pathPreview3, 3)
# imgPreviewJson4 = preproc.genPreview2D(dataImg, dataMsk, dataLes, pathPreview4, 4)
# (6) generate & save JSON report
try:
jsonReport = preproc.getJsonReport(series=series,
reportLesionScore=None, #retLesionScoreBin,
reportLesion=cbir_desc_json,
reportLungs=retLungInfo,
lstImgJson=[imgPreviewJson3, imgPreviewJson2],
reportLesionScoreById = None, #retLesionScoreById,
reportLesionScoreByName = None) #retLesionScoreByName)
with open(pathReport, 'w') as f:
f.write(json.dumps(jsonReport, indent=4))
except Exception as err:
msgErr('Cant generate final JSON report : [{0}], for {1}'.format(err, series))
return False
# (7) generate and save 3 directories with DICOM files, converted from Lesions NifTi
# original, lesions_only and lesions_map
# file names convention: {S3 bucket name}/viewer/{map_type}/{patientID}/{studyUID}/{seriesUID}/{instanceUID}.{extension}
# preproc.prepareCTpreview(series)
# FIXME: append PDF generation in future here
# (6) generate PDF preview
return True
def api_generateCBIR_BuildDSC(db_watcher, num_threads):
print('-')
#########################################
if __name__ == '__main__':
print ('---') | apache-2.0 |
ephes/scikit-learn | sklearn/tree/export.py | 75 | 15670 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
SMAPPNYU/smappPy | smappPy/topics/visualization.py | 2 | 3679 | """
Topic visualization functions
Relies on GENSIM objects
@auth dpb
@date 3/25/2014
"""
import matplotlib.pyplot as plt
from collections import defaultdict
def topic_sum_barchart(corpus, model, show=False, outfile=None, bar_width=0.5, trim_threshold=0.0, normed=True):
"""
Sums the probabilities of each topic across all documents in a corpus. Creates barcharts of sums.
show=True runs matplotlib show.
Specify an outfile value to save to disk as pdf
trim_threshold allows for not displaying topics with a sum probability across all documents
of < given value (default 0.0, ie display all topics)
normed=True indicates that the sum values should be normalized (averaged) over the number of docs in
the corpus
"""
topic_p_dict = defaultdict(float)
for doc in corpus:
for doc_topic in model[doc]:
topic_p_dict[doc_topic[0]] += doc_topic[1]
topic_ids = []
topic_counts = []
for (tid, sum_p) in topic_p_dict.items():
if sum_p >= trim_threshold:
topic_ids.append(tid)
topic_counts.append(sum_p)
if normed:
num_docs = len(corpus)
plt.bar(range(len(topic_ids)), [sp / num_docs for sp in topic_counts], width=bar_width, linewidth=0, color="blue", alpha=0.75)
plt.ylabel("Normalized P over all documents (p >= {0})".format(trim_threshold))
plt.title("Normalized Topic Probability")
else:
plt.bar(range(len(topic_ids)), topic_counts, width=bar_width, linewidth=0, color="blue", alpha=0.75)
plt.ylabel("Sum P over all documents (if >= {0})".format(trim_threshold))
plt.title("Sum Topic Probability")
plt.xlabel("Topics")
plt.tick_params(axis="x", which="both", bottom="off", top="off", labelbottom="on")
plt.xticks(range(len(topic_ids)), topic_ids)
plt.tight_layout()
if outfile != None:
print "(Saving figure to file '{0}'')".format(outfile)
plt.savefig(outfile, format="pdf")
if show:
print "(Showing plot via matplotlib)"
plt.show()
plt.clf()
plt.close()
def topic_occurrence_barchart(corpus, model, topic_threshold=0.2, show=False, outfile=None, bar_width=0.5, trim=True):
"""
Creates barchart of occurences of each topic in each document with a P > topic_threshold.
show=True runs matplotlib show.
Specify an outfile value to save to disk as pdf
If trim=True, remove all 0-count values and then display (maintain indexes)
"""
topic_count_dict = defaultdict(int)
for doc in corpus:
for doc_topic in model[doc]:
if doc_topic[1] >= topic_threshold:
topic_count_dict[doc_topic[0]] += 1
topic_ids = []
topic_counts = []
if trim:
for (tid, count) in topic_count_dict.items():
if count > 0:
topic_ids.append(tid)
topic_counts.append(count)
else:
for (tid, count) in topic_count_dict.items():
topic_ids.append(tid)
topic_counts.append(count)
plt.bar(range(len(topic_ids)), topic_counts, width=bar_width, linewidth=0, color="red", alpha=0.75)
plt.xlabel("Topics")
plt.ylabel("Occurences P >= {0}".format(topic_threshold))
plt.title("Topic Occurrence")
plt.tick_params(axis="x", which="both", bottom="off", top="off", labelbottom="on")
plt.xticks(range(len(topic_ids)), topic_ids)
plt.tight_layout()
if outfile != None:
print "(Saving figure to file '{0}'')".format(outfile)
plt.savefig(outfile, format="pdf")
if show:
print "(Showing plot via matplotlib)"
plt.show()
plt.clf()
plt.close()
| gpl-2.0 |
ebilionis/py-best | best/smc/_plot.py | 1 | 2740 | """
.. _plot:
++++++++
Plotting
++++++++
"""
__all__ = ['hist', 'make_movie_from_db']
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from . import multinomial_resample
from . import kde
def hist(particle_approximation, var_name, normed=True):
"""
Plot the histogram of variable of a particle approximation.
:param particle_approximation: A particle approximation.
:type particle_approximation: :class:`pysmc.ParticleApproximation`
:param var_name: The name of the variable you want to plot.
:type var_name: str
:param bins: The number of bins you want to use.
:type bins: int
:param normed: ``True`` if you want the histogram to be normalized,
``False`` otherwise.
:type normed: bool
"""
x = getattr(particle_approximation, var_name)
w = particle_approximation.weights
if particle_approximation.use_mpi:
comm = particle_approximation.comm
x = np.hstack(comm.allgather(x))
w = np.hstack(comm.allgather(w))
bins = w.shape[0] / 10
plt.xlabel(var_name, fontsize=16)
plt.ylabel('p(%s)' % var_name, fontsize=16)
return plt.hist(x, weights=w, bins=bins, normed=normed)
def make_movie_from_db(db, var_name):
"""
Make a movie from a database.
"""
k = kde(db.particle_approximations[0], var_name)
x01 = k.dataset.min()
x02 = k.dataset.max()
x0 = np.linspace(x01, x02, 100)
y0 = k(x0)
k = kde(db.particle_approximations[-1], var_name)
yl = k(x0)
yl1 = yl.min()
yl2 = yl.max()
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False,
xlim=(x01, x02), ylim=(yl1, yl2))
line, = ax.plot([], [], linewidth=2)
particles, = ax.plot([], [], 'ro', markersize=5)
gamma_text = ax.text(0.02, 0.95, '', transform=ax.transAxes,
fontsize=16)
ax.set_xlabel(var_name, fontsize=16)
ax.set_ylabel('p(%s)' % var_name, fontsize=16)
def init():
line.set_data([], [])
particles.set_data([], [])
gamma_text.set_text('')
return line, particles, gamma_text
def animate(i):
k = kde(db.particle_approximations[i], var_name)
line.set_data(x0, k(x0))
p = getattr(db.particle_approximations[i], var_name)
particles.set_data(p, np.zeros(p.shape) + yl1 + 0.01 * (yl2 - yl1))
gamma_text.set_text('%s = %1.4f' % (db.gamma_name, db.gammas[i]))
return line, particles, gamma_text
ani = animation.FuncAnimation(fig, animate, frames=db.num_gammas,
interval=200, blit=True, init_func=init)
return ani
| lgpl-3.0 |
julietbravo/microhh | cases/rico/rico_stats.py | 5 | 1771 | import xarray as xr
import matplotlib.pyplot as pl
pl.close('all')
f = xr.open_dataset('rico.default.0000000.nc')
# Colors in plot
c_sedi = 'C0' # Sedimentation
c_auto = 'C1' # Autoconversion
c_evap = 'C2' # Evaporation
c_scbr = 'C3' # Selfcollection and breakup
c_accr = 'C4' # Accretion
# Time (index_ to plot
time = 15
pl.figure()
pl.subplot(221)
pl.xlabel('dthl/dt (K h-1)')
pl.plot(f['auto_thlt'][time,:]*3600., f['z'], label='Autconversion', color=c_auto)
pl.plot(f['evap_thlt'][time,:]*3600., f['z'], label='Evaporation', color=c_evap)
pl.plot(f['accr_thlt'][time,:]*3600., f['z'], label='Accretion', color=c_accr)
pl.legend()
pl.subplot(222)
pl.xlabel('dqt/dt (g kg-1 h-1)')
pl.plot(f['auto_qtt'][time,:]*3600000., f['z'], label='Autconversion', color=c_auto)
pl.plot(f['evap_qtt'][time,:]*3600000., f['z'], label='Evaporation', color=c_evap)
pl.plot(f['accr_qtt'][time,:]*3600000., f['z'], label='Accretion', color=c_accr)
pl.legend()
pl.subplot(223)
pl.xlabel('dqr/dt (g kg-1 h-1)')
pl.plot(f['auto_qrt'][time,:]*3600000., f['z'], label='Autconversion', color=c_auto)
pl.plot(f['evap_qrt'][time,:]*3600000., f['z'], label='Evaporation', color=c_evap)
pl.plot(f['accr_qrt'][time,:]*3600000., f['z'], label='Accretion', color=c_accr)
pl.plot(f['sed_qrt' ][time,:]*3600000., f['z'], label='Sedimentation', color=c_sedi)
pl.legend()
pl.subplot(224)
pl.xlabel('dnr/dt (m-3 h-1)')
pl.plot(f['auto_nrt'][time,:]*3600000., f['z'], label='Autconversion', color=c_auto)
pl.plot(f['evap_nrt'][time,:]*3600000., f['z'], label='Evaporation', color=c_evap)
pl.plot(f['scbr_nrt'][time,:]*3600000., f['z'], label='Selfcolletion/breakup', color=c_scbr)
pl.plot(f['sed_nrt' ][time,:]*3600000., f['z'], label='Sedimentation', color=c_sedi)
pl.legend(loc=1)
| gpl-3.0 |
B3AU/waveTree | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
MonoCloud/zipline | tests/finance/test_slippage.py | 32 | 18400 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for finance.slippage
"""
import datetime
import pytz
from unittest import TestCase
from nose_parameterized import parameterized
import pandas as pd
from zipline.finance.slippage import VolumeShareSlippage
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.finance.blotter import Order
class SlippageTestCase(TestCase):
def test_volume_share_slippage(self):
event = Event(
{'volume': 200,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0}
)
slippage_model = VolumeShareSlippage()
open_orders = [
Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
sid=133)
]
orders_txns = list(slippage_model.simulate(
event,
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.01875),
'dt': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': int(50),
'sid': int(133),
'commission': None,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
# TODO: Make expected_txn an Transaction object and ensure there
# is a __eq__ for that class.
self.assertEquals(expected_txn, txn.__dict__)
def test_orders_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
txn = orders_txns[0][1]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133),
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
expected_txn = {}
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
STOP_ORDER_CASES = {
# Stop orders can be long/short and have their price greater or
# less than the stop.
#
# A stop being reached is conditional on the order direction.
# Long orders reach the stop when the price is greater than the stop.
# Short orders reach the stop when the price is less than the stop.
#
# Which leads to the following 4 cases:
#
# | long | short |
# | price > stop | | |
# | price < stop | | |
#
# Currently the slippage module acts according to the following table,
# where 'X' represents triggering a transaction
# | long | short |
# | price > stop | | X |
# | price < stop | X | |
#
# However, the following behavior *should* be followed.
#
# | long | short |
# | price > stop | X | |
# | price < stop | | X |
'long | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 4.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 4.0,
'open': 3.5
},
'expected': {
'transaction': {
'price': 4.001,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': 100,
'sid': 133,
}
}
},
'long | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.6
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 4.0
},
'expected': {
'transaction': None
}
},
'short | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.4
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 3.0
},
'expected': {
'transaction': None
}
},
'short | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.0,
'open': 3.0
},
'expected': {
'transaction': {
'price': 2.99925,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': -100,
'sid': 133,
}
}
},
}
@parameterized.expand([
(name, case['order'], case['event'], case['expected'])
for name, case in STOP_ORDER_CASES.items()
])
def test_orders_stop(self, name, order_data, event_data, expected):
order = Order(**order_data)
event = Event(initial_values=event_data)
slippage_model = VolumeShareSlippage()
try:
_, txn = next(slippage_model.simulate(event, [order]))
except StopIteration:
txn = None
if expected['transaction'] is None:
self.assertIsNone(txn)
else:
self.assertIsNotNone(txn)
for key, value in expected['transaction'].items():
self.assertEquals(value, txn[key])
def test_orders_stop_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.0})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 4.0})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
def gen_trades(self):
# create a sequence of trades
events = [
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 4.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 4.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'open': 3.5
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'open': 4.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'open': 3.5
})
]
return events
| apache-2.0 |
XianliangJ/collections | DCTCPTest/helper.py | 8 | 3406 | '''
Helper module for the plot scripts.
'''
import re
import itertools
import matplotlib as m
import os
if os.uname()[0] == "Darwin":
m.use("MacOSX")
else:
m.use("Agg")
import matplotlib.pyplot as plt
import argparse
import math
#import termcolor as T
def read_list(fname, delim=','):
lines = open(fname).xreadlines()
ret = []
for l in lines:
ls = l.strip().split(delim)
ls = map(lambda e: '0' if e.strip() == '' or e.strip() == 'ms' or e.strip() == 's' else e, ls)
ret.append(ls)
return ret
def ewma(alpha, values):
if alpha == 0:
return values
ret = []
prev = 0
for v in values:
prev = alpha * prev + (1 - alpha) * v
ret.append(prev)
return ret
def col(n, obj = None, clean = lambda e: e):
"""A versatile column extractor.
col(n, [1,2,3]) => returns the nth value in the list
col(n, [ [...], [...], ... ] => returns the nth column in this matrix
col('blah', { ... }) => returns the blah-th value in the dict
col(n) => partial function, useful in maps
"""
if obj == None:
def f(item):
return clean(item[n])
return f
if type(obj) == type([]):
if len(obj) > 0 and (type(obj[0]) == type([]) or type(obj[0]) == type({})):
return map(col(n, clean=clean), obj)
if type(obj) == type([]) or type(obj) == type({}):
try:
return clean(obj[n])
except:
#print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
# We wouldn't know what to do here, so just return None
#print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
def transpose(l):
return zip(*l)
def avg(lst):
return sum(map(float, lst)) / len(lst)
def stdev(lst):
mean = avg(lst)
var = avg(map(lambda e: (e - mean)**2, lst))
return math.sqrt(var)
def xaxis(values, limit):
l = len(values)
return zip(*map(lambda (x,y): (x*1.0*limit/l, y), enumerate(values)))
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def cdf(values):
values.sort()
prob = 0
l = len(values)
x, y = [], []
for v in values:
prob += 1.0 / l
x.append(v)
y.append(prob)
return (x, y)
def parse_cpu_usage(fname, nprocessors=8):
"""Returns (user,system,nice,iowait,hirq,sirq,steal) tuples
aggregated over all processors. DOES NOT RETURN IDLE times."""
data = grouper(nprocessors, open(fname).readlines())
"""Typical line looks like:
Cpu0 : 0.0%us, 1.0%sy, 0.0%ni, 97.0%id, 0.0%wa, 0.0%hi, 2.0%si, 0.0%st
"""
ret = []
for collection in data:
total = [0]*8
for cpu in collection:
usages = cpu.split(':')[1]
usages = map(lambda e: e.split('%')[0],
usages.split(','))
for i in xrange(len(usages)):
total[i] += float(usages[i])
total = map(lambda t: t/nprocessors, total)
# Skip idle time
ret.append(total[0:3] + total[4:])
return ret
def pc95(lst):
l = len(lst)
return sorted(lst)[ int(0.95 * l) ]
def pc99(lst):
l = len(lst)
return sorted(lst)[ int(0.99 * l) ]
def coeff_variation(lst):
return stdev(lst) / avg(lst)
| gpl-3.0 |
Achuth17/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 67 | 14842 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = assert_warns(DeprecationWarning, make_multilabel_classification,
n_samples=100, n_features=20, n_classes=3,
random_state=0, allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator=True,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
return_indicator=True, allow_unlabeled=allow_unlabeled,
return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
hugobowne/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
robbymeals/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
pySTEPS/pysteps | examples/data_transformations.py | 1 | 7504 | # -*- coding: utf-8 -*-
"""
Data transformations
====================
The statistics of intermittent precipitation rates are particularly non-Gaussian
and display an asymmetric distribution bounded at zero.
Such properties restrict the usage of well-established statistical methods that
assume symmetric or Gaussian data.
A common workaround is to introduce a suitable data transformation to approximate
a normal distribution.
In this example, we test the data transformation methods available in pysteps
in order to obtain a more symmetric distribution of the precipitation data
(excluding the zeros).
The currently available transformations include the Box-Cox, dB, square-root and
normal quantile transforms.
"""
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from pysteps import io, rcparams
from pysteps.utils import conversion, transformation
from scipy.stats import skew
###############################################################################
# Read the radar input images
# ---------------------------
#
# First, we will import the sequence of radar composites.
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201609281600", "%Y%m%d%H%M")
data_source = rcparams.data_sources["fmi"]
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Get 1 hour of observations in the data archive
fns = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_next_files=11
)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
Z, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
# Keep only positive rainfall values
Z = Z[Z > metadata["zerovalue"]].flatten()
# Convert to rain rate
R, metadata = conversion.to_rainrate(Z, metadata)
###############################################################################
# Test data transformations
# -------------------------
# Define method to visualize the data distribution with boxplots and plot the
# corresponding skewness
def plot_distribution(data, labels, skw):
N = len(data)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax2.plot(np.arange(N + 2), np.zeros(N + 2), ":r")
ax1.boxplot(data, labels=labels, sym="", medianprops={"color": "k"})
ymax = []
for i in range(N):
y = skw[i]
x = i + 1
ax2.plot(x, y, "*r", ms=10, markeredgecolor="k")
ymax.append(np.max(data[i]))
# ylims
ylims = np.percentile(ymax, 50)
ax1.set_ylim((-1 * ylims, ylims))
ylims = np.max(np.abs(skw))
ax2.set_ylim((-1.1 * ylims, 1.1 * ylims))
# labels
ax1.set_ylabel(r"Standardized values [$\sigma$]")
ax2.set_ylabel(r"Skewness []", color="r")
ax2.tick_params(axis="y", labelcolor="r")
###############################################################################
# Box-Cox transform
# ~~~~~~~~~~~~~~~~~
# The Box-Cox transform is a well-known power transformation introduced by
# `Box and Cox (1964)`_. In its one-parameter version, the Box-Cox transform
# takes the form T(x) = ln(x) for lambda = 0, or T(x) = (x**lambda - 1)/lambda
# otherwise.
#
# To find a suitable lambda, we will experiment with a range of values
# and select the one that produces the most symmetric distribution, i.e., the
# lambda associated with a value of skewness closest to zero.
# To visually compare the results, the transformed data are standardized.
#
# .. _`Box and Cox (1964)`: https://doi.org/10.1111/j.2517-6161.1964.tb00553.x
data = []
labels = []
skw = []
# Test a range of values for the transformation parameter Lambda
Lambdas = np.linspace(-0.4, 0.4, 11)
for i, Lambda in enumerate(Lambdas):
R_, _ = transformation.boxcox_transform(R, metadata, Lambda)
R_ = (R_ - np.mean(R_)) / np.std(R_)
data.append(R_)
labels.append("{0:.2f}".format(Lambda))
skw.append(skew(R_)) # skewness
# Plot the transformed data distribution as a function of lambda
plot_distribution(data, labels, skw)
plt.title("Box-Cox transform")
plt.tight_layout()
plt.show()
# Best lambda
idx_best = np.argmin(np.abs(skw))
Lambda = Lambdas[idx_best]
print("Best parameter lambda: %.2f\n(skewness = %.2f)" % (Lambda, skw[idx_best]))
###############################################################################
# Compare data transformations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
data = []
labels = []
skw = []
###############################################################################
# Rain rates
# ~~~~~~~~~~
# First, let's have a look at the original rain rate values.
data.append((R - np.mean(R)) / np.std(R))
labels.append("R")
skw.append(skew(R))
###############################################################################
# dB transform
# ~~~~~~~~~~~~
# We transform the rainfall data into dB units: 10*log(R)
R_, _ = transformation.dB_transform(R, metadata)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("dB")
skw.append(skew(R_))
###############################################################################
# Square-root transform
# ~~~~~~~~~~~~~~~~~~~~~
# Transform the data using the square-root: sqrt(R)
R_, _ = transformation.sqrt_transform(R, metadata)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("sqrt")
skw.append(skew(R_))
###############################################################################
# Box-Cox transform
# ~~~~~~~~~~~~~~~~~
# We now apply the Box-Cox transform using the best parameter lambda found above.
R_, _ = transformation.boxcox_transform(R, metadata, Lambda)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("Box-Cox\n($\lambda=$%.2f)" % Lambda)
skw.append(skew(R_))
###############################################################################
# Normal quantile transform
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# At last, we apply the empirical normal quantile (NQ) transform as described in
# `Bogner et al (2012)`_.
#
# .. _`Bogner et al (2012)`: http://dx.doi.org/10.5194/hess-16-1085-2012
R_, _ = transformation.NQ_transform(R, metadata)
data.append((R_ - np.mean(R_)) / np.std(R_))
labels.append("NQ")
skw.append(skew(R_))
###############################################################################
# By plotting all the results, we can notice first of all the strongly asymmetric
# distribution of the original data (R) and that all transformations manage to
# reduce its skewness. Among these, the Box-Cox transform (using the best parameter
# lambda) and the normal quantile (NQ) transform provide the best correction.
# Despite not producing a perfectly symmetric distribution, the square-root (sqrt)
# transform has the strong advantage of being defined for zeros, too, while all
# other transformations need an arbitrary rule for non-positive values.
plot_distribution(data, labels, skw)
plt.title("Data transforms")
plt.tight_layout()
plt.show()
| bsd-3-clause |
alekz112/statsmodels | statsmodels/sandbox/tsa/examples/ex_mle_garch.py | 31 | 10676 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 01:01:50 2010
Author: josef-pktd
latest result
-------------
all are very close
garch0 has different parameterization of constant
ordering of parameters is different
seed 2780185
h.shape (2000,)
Optimization terminated successfully.
Current function value: 2093.813397
Iterations: 387
Function evaluations: 676
ggres.params [-0.6146253 0.1914537 0.01039355 0.78802188]
Optimization terminated successfully.
Current function value: 2093.972953
Iterations: 201
Function evaluations: 372
ggres0.params [-0.61537527 0.19635128 4.00706058]
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2093.972953
Iterations: 51
Function evaluations: 551
Gradient evaluations: 110
ggres0.params [-0.61537855 0.19635265 4.00694669]
Optimization terminated successfully.
Current function value: 2093.751420
Iterations: 103
Function evaluations: 187
[ 0.78671519 0.19692222 0.61457171]
-2093.75141963
Final Estimate:
LLH: 2093.750 norm LLH: 2.093750
omega alpha1 beta1
0.7867438 0.1970437 0.6145467
long run variance comparison
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
R
>>> 0.7867438/(1- 0.1970437- 0.6145467)
4.1757097302897526
Garch (gjr) asymetric, longrun var ?
>>> 1/(1-0.6146253 - 0.1914537 - 0.01039355) * 0.78802188
4.2937548579245242
>>> 1/(1-0.6146253 - 0.1914537 + 0.01039355) * 0.78802188
3.8569053452140345
Garch0
>>> (1-0.61537855 - 0.19635265) * 4.00694669
0.7543830449902722
>>> errgjr4.var() #for different random seed
4.0924199964716106
todo: add code and verify, check for longer lagpolys
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import statsmodels.api as sm
from statsmodels.sandbox import tsa
from statsmodels.sandbox.tsa.garch import * # local import
nobs = 1000
examples = ['garch', 'rpyfit']
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print('seed', seed)
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print('seed', seed)
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.8,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation - DGP')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print('ggres0.params', ggres0.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2])
print(g11res)
llf = loglike_GARCH11(g11res, errgjr4-errgjr4.mean())
print(llf[0])
if 'rpyfit' in examples:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4-errgjr4.mean(), include_mean=False)
if 'rpysim' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print('R acf', tsa.acf(np.power(x,2))[:15])
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print(arma3res.params)
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print(arma3bres.params)
xr = r.garchSim( n = 100)
x = np.asarray(xr)
ggmod = Garch(x-x.mean())
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, x-x.mean())[0], [0.6, 0.6, 0.2])
print(g11res)
llf = loglike_GARCH11(g11res, x-x.mean())
print(llf[0])
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
fit = r.garchFit(f, data = x-x.mean(), include_mean=False, trace=False)
print(r.summary(fit))
'''based on R default simulation
model = list(omega = 1e-06, alpha = 0.1, beta = 0.8)
nobs = 1000
(with nobs=500, gjrgarch doesn't do well
>>> ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
Optimization terminated successfully.
Current function value: -448.861335
Iterations: 385
Function evaluations: 690
>>> print('ggres.params', ggres.params
ggres.params [ -7.75090330e-01 1.57714749e-01 -9.60223930e-02 8.76021411e-07]
rearranged
8.76021411e-07 1.57714749e-01(-9.60223930e-02) 7.75090330e-01
>>> print(g11res
[ 2.97459808e-06 7.83128600e-01 2.41110860e-01]
>>> llf = loglike_GARCH11(g11res, x-x.mean())
>>> print(llf[0]
442.603541936
Log Likelihood:
-448.9376 normalized: -4.489376
omega alpha1 beta1
1.01632e-06 1.02802e-01 7.57537e-01
'''
''' the following is for errgjr4-errgjr4.mean()
ggres.params [-0.54510407 0.22723132 0.06482633 0.82325803]
Final Estimate:
LLH: 2065.56 norm LLH: 2.06556
mu omega alpha1 beta1
0.07229732 0.83069480 0.26313883 0.53986167
ggres.params [-0.50779163 0.2236606 0.00700036 1.154832
Final Estimate:
LLH: 2116.084 norm LLH: 2.116084
mu omega alpha1 beta1
-4.759227e-17 1.145404e+00 2.288348e-01 5.085949e-01
run3
DGP
0.4/?? 0.8 0.7
gjrgarch:
ggres.params [-0.45196579 0.2569641 0.02201904 1.11942636]
rearranged
const/omega ma1/alpha1 ar1/beta1
1.11942636 0.2569641(+0.02201904) 0.45196579
g11:
[ 1.10262688 0.26680468 0.45724957]
-2055.73912687
R:
Final Estimate:
LLH: 2055.738 norm LLH: 2.055738
mu omega alpha1 beta1
-1.665226e-17 1.102396e+00 2.668712e-01 4.573224e-01
fit = r.garchFit(f, data = errgjr4-errgjr4.mean())
rpy.RPy_RException: Error in solve.default(fit$hessian) :
Lapack routine dgesv: system is exactly singular
run4
DGP:
mu=0.4, scale=1.01
ma = [[1., 0, 0],[0, 0.8,0.0]], ar = [1.0, -0.7]
maybe something wrong with simulation
gjrgarch
ggres.params [-0.50554663 0.24449867 -0.00521004 1.00796791]
rearranged
1.00796791 0.24449867(-0.00521004) 0.50554663
garch11:
[ 1.01258264 0.24149155 0.50479994]
-2056.3877404
R include_constant=False
Final Estimate:
LLH: 2056.397 norm LLH: 2.056397
omega alpha1 beta1
1.0123560 0.2409589 0.5049154
'''
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
if 'sp500' in examples:
import tabular as tb
import scikits.timeseries as ts
a = tb.loadSV(r'C:\Josef\work-oth\gspc_table.csv')
s = ts.time_series(a[0]['Close'][::-1],
dates=ts.date_array(a[0]['Date'][::-1],freq="D"))
sp500 = a[0]['Close'][::-1]
sp500r = np.diff(np.log(sp500))
#plt.show()
| bsd-3-clause |
rohanp/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
Delphine-L/tools-iuc | tools/fsd/fsd_regions.py | 17 | 11924 | #!/usr/bin/env python
# Family size distribution of tags which were aligned to the reference genome
#
# Author: Monika Heinzl & Gundula Povysil, Johannes-Kepler University Linz (Austria)
# Contact: [email protected]
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS,
# a BAM file with tags of reads that overlap the regions of the reference genome and
# an optional BED file with chromosome, start and stop position of the regions as input.
# The program produces a plot which shows the distribution of family sizes of the tags from the input files and
# a tabular file with the data of the plot.
# USAGE: python FSD_regions.py --inputFile filenameSSCS --inputName1 filenameSSCS
# --bamFile DCSbamFile --rangesFile BEDfile --output_tabular outptufile_name_tabular
# --output_pdf outputfile_name_pdf
import argparse
import collections
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pysam
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = np.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)
return data_array
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of tags which were aligned to regions of the reference genome')
parser.add_argument('--inputFile', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--bamFile', help='BAM file with aligned reads.')
parser.add_argument('--rangesFile', default=None, help='BED file with chromosome, start and stop positions.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf and tabular file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the pdf and tabular file.')
return parser
def compare_read_families_refGenome(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile
name1 = args.inputName1
name1 = name1.split(".tabular")[0]
bamFile = args.bamFile
rangesFile = args.rangesFile
title_file = args.output_pdf
title_file2 = args.output_tabular
sep = "\t"
with open(title_file2, "w") as output_file, PdfPages(title_file) as pdf:
data_array = readFileReferenceFree(firstFile, "\t")
pysam.index(bamFile)
bam = pysam.AlignmentFile(bamFile, "rb")
qname_dict = collections.OrderedDict()
if rangesFile is not None:
with open(rangesFile, 'r') as regs:
range_array = np.genfromtxt(regs, skip_header=0, delimiter='\t', comments='#', dtype=str)
if range_array.ndim == 0:
print("Error: file has 0 lines")
exit(2)
if range_array.ndim == 1:
chrList = range_array[0]
start_posList = range_array[1].astype(int)
stop_posList = range_array[2].astype(int)
chrList = [chrList.tolist()]
start_posList = [start_posList.tolist()]
stop_posList = [stop_posList.tolist()]
else:
chrList = range_array[:, 0]
start_posList = range_array[:, 1].astype(int)
stop_posList = range_array[:, 2].astype(int)
if len(start_posList) != len(stop_posList):
print("start_positions and end_positions do not have the same length")
exit(3)
chrList = np.array(chrList)
start_posList = np.array(start_posList).astype(int)
stop_posList = np.array(stop_posList).astype(int)
for chr, start_pos, stop_pos in zip(chrList, start_posList, stop_posList):
chr_start_stop = "{}_{}_{}".format(chr, start_pos, stop_pos)
qname_dict[chr_start_stop] = []
for read in bam.fetch(chr, start_pos, stop_pos):
if not read.is_unmapped:
if re.search('_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
qname_dict[chr_start_stop].append(tags)
else:
for read in bam.fetch():
if not read.is_unmapped:
if re.search(r'_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
if read.reference_name not in qname_dict:
qname_dict[read.reference_name] = [tags]
else:
qname_dict[read.reference_name].append(tags)
seq = np.array(data_array[:, 1])
tags = np.array(data_array[:, 2])
quant = np.array(data_array[:, 0]).astype(int)
group = np.array(list(qname_dict.keys()))
all_ab = seq[np.where(tags == "ab")[0]]
all_ba = seq[np.where(tags == "ba")[0]]
quant_ab = quant[np.where(tags == "ab")[0]]
quant_ba = quant[np.where(tags == "ba")[0]]
seqDic_ab = dict(zip(all_ab, quant_ab))
seqDic_ba = dict(zip(all_ba, quant_ba))
lst_ab = []
lst_ba = []
quantAfterRegion = []
length_regions = 0
for i in group:
lst_ab_r = []
lst_ba_r = []
seq_mut = qname_dict[i]
if rangesFile is None:
seq_mut, seqMut_index = np.unique(np.array(seq_mut), return_index=True)
length_regions = length_regions + len(seq_mut) * 2
for r in seq_mut:
count_ab = seqDic_ab.get(r)
count_ba = seqDic_ba.get(r)
lst_ab_r.append(count_ab)
lst_ab.append(count_ab)
lst_ba_r.append(count_ba)
lst_ba.append(count_ba)
dataAB = np.array(lst_ab_r)
dataBA = np.array(lst_ba_r)
bigFamilies = np.where(dataAB > 20)[0]
dataAB[bigFamilies] = 22
bigFamilies = np.where(dataBA > 20)[0]
dataBA[bigFamilies] = 22
quantAll = np.concatenate((dataAB, dataBA))
quantAfterRegion.append(quantAll)
quant_ab = np.array(lst_ab)
quant_ba = np.array(lst_ba)
maximumX = np.amax(np.concatenate(quantAfterRegion))
minimumX = np.amin(np.concatenate(quantAfterRegion))
# PLOT
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "black"
fig = plt.figure()
plt.subplots_adjust(bottom=0.3)
colors = ["#6E6E6E", "#0431B4", "#5FB404", "#B40431", "#F4FA58", "#DF7401", "#81DAF5"]
col = []
for i in range(0, len(group)):
col.append(colors[i])
counts = plt.hist(quantAfterRegion, bins=range(minimumX, maximumX + 1), stacked=False, label=group,
align="left", alpha=1, color=col, edgecolor="black", linewidth=1)
ticks = np.arange(minimumX - 1, maximumX, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(np.array(ticks), ticks1)
count = np.bincount([int(_) for _ in quant_ab]) # original counts
legend = "max. family size:\nabsolute frequency:\nrelative frequency:\n\ntotal nr. of reads:\n(before SSCS building)"
plt.text(0.15, 0.085, legend, size=11, transform=plt.gcf().transFigure)
legend = "AB\n{}\n{}\n{:.5f}\n\n{:,}".format(max(map(int, quant_ab)), count[len(count) - 1], float(count[len(count) - 1]) / sum(count), sum(np.array(data_array[:, 0]).astype(int)))
plt.text(0.35, 0.105, legend, size=11, transform=plt.gcf().transFigure)
count2 = np.bincount([int(_) for _ in quant_ba]) # original counts
legend = "BA\n{}\n{}\n{:.5f}" \
.format(max(map(int, quant_ba)), count2[len(count2) - 1], float(count2[len(count2) - 1]) / sum(count2))
plt.text(0.45, 0.1475, legend, size=11, transform=plt.gcf().transFigure)
plt.text(0.55, 0.2125, "total nr. of tags:", size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.2125, "{:,} ({:,})".format(length_regions, length_regions / 2), size=11,
transform=plt.gcf().transFigure)
legend4 = "* In the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n"
plt.text(0.1, 0.01, legend4, size=11, transform=plt.gcf().transFigure)
space = 0
for i, count in zip(group, quantAfterRegion):
plt.text(0.55, 0.15 - space, "{}:\n".format(i), size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.15 - space, "{:,}\n".format(len(count) / 2), size=11, transform=plt.gcf().transFigure)
space = space + 0.02
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.xlabel("Family size", fontsize=14)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
plt.margins(0.01, None)
pdf.savefig(fig, bbox_inch="tight")
plt.close()
output_file.write("Dataset:{}{}\n".format(sep, name1))
output_file.write("{}AB{}BA\n".format(sep, sep))
output_file.write("max. family size:{}{}{}{}\n".format(sep, max(map(int, quant_ab)), sep, max(map(int, quant_ba))))
output_file.write("absolute frequency:{}{}{}{}\n".format(sep, count[len(count) - 1], sep, count2[len(count2) - 1]))
output_file.write("relative frequency:{}{:.3f}{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count), sep, float(count2[len(count2) - 1]) / sum(count2)))
output_file.write("total nr. of reads{}{}\n".format(sep, sum(np.array(data_array[:, 0]).astype(int))))
output_file.write("total nr. of tags{}{} ({})\n".format(sep, length_regions, length_regions / 2))
output_file.write("\n\nValues from family size distribution\n")
output_file.write("{}".format(sep))
for i in group:
output_file.write("{}{}".format(i, sep))
output_file.write("\n")
j = 0
for fs in counts[1][0:len(counts[1]) - 1]:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(group) == 1:
output_file.write("{}{}".format(int(counts[0][j]), sep))
else:
for n in range(len(group)):
output_file.write("{}{}".format(int(counts[0][n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(group) == 1:
output_file.write("{}{}".format(int(sum(counts[0])), sep))
else:
for i in counts[0]:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
output_file.write("\n\nIn the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n")
output_file.write("Region{}total nr. of tags per region\n".format(sep))
for i, count in zip(group, quantAfterRegion):
output_file.write("{}{}{}\n".format(i, sep, len(count) / 2))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families_refGenome(sys.argv))
| mit |
yuyuz/FLASH | benchmarks/sklearn/cache.py | 1 | 5960 | from sklearn.datasets import load_svmlight_file, dump_svmlight_file
from sklearn.cross_validation import KFold
import logging
import numpy as np
import hashlib
import os.path
import re
import time
logging.basicConfig(filename='cache.log', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s)')
logger = logging.getLogger("flash.cache")
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
_n_fold = 3 # hard code cross validated folds
_random_state = 41
def cached_run(steps, X, y):
step_identifier = ''
# split data
n = len(y)
kf = KFold(n, _n_fold, random_state=_random_state)
folded_data = [(X[train_index], y[train_index], X[test_index], y[test_index]) for train_index, test_index in kf]
# last step is estimator, handle separately
for step in steps[:-1]:
step_identifier += "/%s" % _step_identifier(step)
logger.info("Processing %s", step_identifier)
folded_data = run_step_on_demand(step_identifier, step, folded_data)
scores = []
estimator = steps[-1]
step_identifier += "/%s" % _step_identifier(estimator)
for (X_train, y_train, X_test, y_test) in folded_data:
estimator.fit(X_train, y_train)
scores.append(estimator.score(X_test, y_test))
score = np.mean(scores)
logger.info("score of %s is %r", step_identifier, score)
return score
def run_step_on_demand(step_identifier, step, folded_data): #X_train, y_train, X_test, y_test
# Checkout from cache first
should_cache = _step_should_cache(step_identifier)
logger.info("Step %s should cache = %r", step_identifier, should_cache)
if should_cache:
res_cached = check_step_res_exist(step_identifier)
if res_cached:
logger.info("Cached hit for step %s", step_identifier)
starttime = time.time()
res = [load_step_res(_step_fold_identifier(step_identifier, fold)) for fold in range(_n_fold)]
duration = time.time() - starttime
logger.info("load cache with %f seconds", duration)
return res
logger.info("Cache missed for step %s. Calculating...", step_identifier)
starttime = time.time()
res = []
for (fold, (X_train, y_train, X_test, y_test)) in enumerate(folded_data):
X_train, y_train, X_test, y_test = run_step_fold(step, X_train, y_train, X_test, y_test)
if should_cache:
save_step_res(_step_fold_identifier(step_identifier, fold), X_train, y_train, X_test, y_test)
res.append((X_train, y_train, X_test, y_test))
duration = time.time() - starttime
logger.info("finished step %s running in %f seconds", step_identifier, duration)
return res
def run_step_fold(step, X_train, y_train, X_test, y_test):
X_train = step.fit_transform(X_train, y_train)
X_test = step.transform(X_test)
return (X_train, y_train, X_test, y_test)
def save_step_res(step_fold_identifier, X_train, y_train, X_test, y_test):
file_name_base = hashlib.sha224(step_fold_identifier).hexdigest()
logger.info("Saving [%s] to [%s]", step_fold_identifier, file_name_base)
with open(file_name_base + ".train", "wb") as f:
dump_svmlight_file(X_train, y_train, f)
with open(file_name_base + ".test", "wb") as f:
dump_svmlight_file(X_test, y_test, f)
def load_step_res(step_fold_identifier):
file_name_base = hashlib.sha224(step_fold_identifier).hexdigest()
logger.info("loading [%s] from [%s]", step_fold_identifier, file_name_base)
with open(file_name_base + ".train", "rb") as f:
X_train, y_train = load_svmlight_file(f)
X_train = X_train.toarray()
with open(file_name_base + ".test", "rb") as f:
X_test, y_test = load_svmlight_file(f)
X_test = X_test.toarray()
return (X_train, y_train, X_test, y_test)
def check_step_res_exist(step_identifier):
return all(check_step_fold_res_exist(_step_fold_identifier(step_identifier, fold)) for fold in range(_n_fold))
def check_step_fold_res_exist(step_fold_identifier):
file_name_base = hashlib.sha224(step_fold_identifier).hexdigest()
logger.debug("checking %s", file_name_base)
existence = os.path.isfile(file_name_base + ".test")
logger.debug("%s existence = %r", file_name_base, existence)
return existence
def _step_fold_identifier(step_identifier, fold):
return '/' + str(fold) + step_identifier
def _step_identifier(step):
def param_value_to_string(value):
if hasattr(value, '__call__'):
return value.__name__
return v
return type(step).__name__ + '=' + '&'.join(['%s:%s' %(k, param_value_to_string(v)) for k,v in step.get_params().items()])
def _step_should_cache(step_identifier):
# TODO: check param and decide, a smarter way should be employed here
def step_cache(name):
logger.info("checking %s", name)
return name in ['MaxAbsScaler', 'MinMaxScaler', 'StandardScaler', 'Normalizer', 'PolynomialFeatures']
return all(step_cache(name) for name in re.findall(r'(\w+)=', step_identifier))
def main():
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import MinMaxScaler
X, y = samples_generator.make_classification(n_samples=1000, n_informative=5, n_redundant=4, random_state=_random_state)
anova_filter = SelectKBest(f_regression, k=5)
scaler = MinMaxScaler()
clf = svm.SVC(kernel='linear')
steps = [scaler, anova_filter, clf]
cached_run(steps, X, y)
if __name__ == '__main__':
main()
| gpl-3.0 |
vighneshbirodkar/scikit-image | doc/examples/features_detection/plot_shape_index.py | 5 | 4382 | """
===========
Shape Index
===========
The shape index is a single valued measure of local curvature,
derived from the eigen values of the Hessian,
defined by Koenderink & van Doorn [1]_.
It can be used to find structures based on their apparent local shape.
The shape index maps to values from -1 to 1,
representing different kind of shapes (see the documentation for details).
In this example, a random image with spots is generated,
which should be detected.
A shape index of 1 represents 'spherical caps',
the shape of the spots we want to detect.
The leftmost plot shows the generated image, the center shows a 3D render
of the image, taking intensity values as height of a 3D surface,
and the right one shows the shape index (s).
As visible, the shape index readily amplifies the local shape of noise as well,
but is insusceptible to global phenomena (e.g. uneven illumination).
The blue and green marks are points which deviate no more than 0.05
from the desired shape. To attenuate noise in the signal, the
green marks are taken from the shape index (s)
after another Gaussian blur pass (yielding s').
Note how spots interjoined too closely are *not* detected,
as they do not posses the desired shape.
.. [1] Koenderink, J. J. & van Doorn, A. J.,
"Surface shape and curvature scales",
Image and Vision Computing, 1992, 10, 557-564.
DOI:10.1016/0262-8856(92)90076-F
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import ndimage as ndi
from skimage.feature import shape_index
from skimage.draw import circle
def create_test_image(
image_size=256, spot_count=30, spot_radius=5, cloud_noise_size=4):
"""
Generate a test image with random noise, uneven illumination and spots.
"""
state = np.random.get_state()
np.random.seed(314159265) # some digits of pi
image = np.random.normal(
loc=0.25,
scale=0.25,
size=(image_size, image_size)
)
for _ in range(spot_count):
rr, cc = circle(
np.random.randint(image.shape[0]),
np.random.randint(image.shape[1]),
spot_radius,
shape=image.shape
)
image[rr, cc] = 1
image *= np.random.normal(loc=1.0, scale=0.1, size=image.shape)
image *= ndi.zoom(
np.random.normal(
loc=1.0,
scale=0.5,
size=(cloud_noise_size, cloud_noise_size)
),
image_size / cloud_noise_size
)
np.random.set_state(state)
return ndi.gaussian_filter(image, sigma=2.0)
# First create the test image and its shape index
image = create_test_image()
s = shape_index(image)
# In this example we want to detect 'spherical caps',
# so we threshold the shape index map to
# find points which are 'spherical caps' (~1)
target = 1
delta = 0.05
point_y, point_x = np.where(np.abs(s - target) < delta)
point_z = image[point_y, point_x]
# The shape index map relentlessly produces the shape, even that of noise.
# In order to reduce the impact of noise, we apply a Gaussian filter to it,
# and show the results once in
s_smooth = ndi.gaussian_filter(s, sigma=0.5)
point_y_s, point_x_s = np.where(np.abs(s_smooth - target) < delta)
point_z_s = image[point_y_s, point_x_s]
fig = plt.figure(figsize=(24, 8))
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(image, cmap=plt.cm.gray)
ax1.axis('off')
ax1.set_title('Input image', fontsize=18)
scatter_settings = dict(alpha=0.75, s=10, linewidths=0)
ax1.scatter(point_x, point_y, color='blue', **scatter_settings)
ax1.scatter(point_x_s, point_y_s, color='green', **scatter_settings)
ax2 = fig.add_subplot(1, 3, 2, projection='3d', sharex=ax1, sharey=ax1)
x, y = np.meshgrid(
np.arange(0, image.shape[0], 1),
np.arange(0, image.shape[1], 1)
)
ax2.plot_surface(x, y, image, linewidth=0, alpha=0.5)
ax2.scatter(
point_x,
point_y,
point_z,
color='blue',
label='$|s - 1|<0.05$',
**scatter_settings
)
ax2.scatter(
point_x_s,
point_y_s,
point_z_s,
color='green',
label='$|s\' - 1|<0.05$',
**scatter_settings
)
ax2.legend()
ax2.axis('off')
ax2.set_title('3D visualization')
ax3 = fig.add_subplot(1, 3, 3, sharex=ax1, sharey=ax1)
ax3.imshow(s, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Shape index, $\sigma=1$', fontsize=18)
fig.tight_layout()
plt.show()
| bsd-3-clause |
iamutkarshtiwari/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
jhavstad/model_runner | src/ModelRunnerPlots.py | 1 | 25806 | from ggplot import *
import pandas as pd
import logging
import numpy as np
import matplotlib.pyplot as plt
def get_model(name):
try:
return name[0:name.index('_')]
except:
pass
return name
def get_era(name):
try:
firstIndex = name.index('_')
return name[firstIndex+1:name.index('_', firstIndex+1)]
except:
pass
return name
def get_col_intersection(df1, df2):
df1_columns = df1.columns
df2_columns = df2.columns
intersection_columns = df1_columns
for col in intersection_columns:
if col not in df2_columns:
intersection_columns.remove(col)
return intersection_columns
def get_model_diffs(df1, df2, historic_start_year, historic_end_year, future_start_year, future_end_year, index_column):
'''
This function is really just an effort to organize the incoming data frames
into a format so that the historic value range can be subtracted from the
the future value range. This function will take the mean of the values
within each range, subtract them and return the result. It will return
the result for both data frames. The input data frames represent the
X and Y plot axes respectiviely.
The output will be a dictionary with elements in an np.array format,
with one array for each one.
'''
# Attempt to get the columns that are common between the two data frames.
intersection_columns = get_col_intersection(df1, df2)
# Create a listing of all the models that are common between the two data
# frames (provided above) and then filter out those that do not have
# both a future and historic values.
models = dict()
historic_value = 'historical'
historic_key = 'historic'
future_key = 'future'
for col in intersection_columns:
model = get_model(col)
era = get_era(col)
if model not in models:
models[model] = dict()
#print("era: " + era)
if era == historic_value:
models[model][historic_key] = col
else:
models[model][future_key] = col
# Return the differences between the historic and future values
# for the two data frames.
df1_diffs = _get_model_diffs(df1, models, historic_start_year, historic_end_year, future_start_year, future_end_year, historic_value, historic_key, future_key, index_column)
df2_diffs = _get_model_diffs(df2, models, historic_start_year, historic_end_year, future_start_year, future_end_year, historic_value, historic_key, future_key, index_column)
return (df1_diffs, df2_diffs)
def _get_model_diffs(df, models, historic_start_year, historic_end_year, future_start_year, future_end_year, historic_value, historic_key, future_key, index_column):
#print("Retrieving values for data frame: " + str(models))
#print("Historic interval (years): " + str(historic_end_year - historic_start_year))
#print("Future interval (years): " + str(future_end_year - future_start_year))
output = dict()
for model in models:
#print("Model in models: " + str(models[model]))
if historic_key in models[model] and future_key in models[model]:
#print("Calculating difference for model: " + model);
df_historic = models[model][historic_key]
df_future = models[model][future_key]
df_historic = df[df_historic]
df_future = df[df_future]
df_historic = df_historic[df[index_column] >= historic_start_year]
df_historic = df_historic[df[index_column] <= historic_end_year]
df_future = df_future[df[index_column] >= future_start_year]
df_future = df_future[df[index_column] <= future_end_year]
df_historic_values = df_historic.values.flatten()
df_future_values = df_future.values.flatten()
#print(str(len(df_future_values)))
#print(str(len(df_historic_values)))
output[model] = df_future_values - df_historic_values
return output
def get_model_sub_strings(column):
sub1 = column[0: column.index('_')]
sub2 = column[column.rindex('_')+1:len(column)]
return (sub1, sub2)
def merge_columns(df):
col_search = df.columns - ['year']
col = col_search[0]
match_cols = list()
while len(col_search) > 1:
(sub1, sub2) = get_model_sub_strings(col)
col_search = col_search - [col]
for next_col in col_search:
(sub1_next, sub2_next) = get_model_sub_strings(next_col)
if sub1 == sub1_next and sub2 == sub2_next:
match_cols.append([col, next_col, sub1, sub2])
col_search = col_search - [next_col]
if len(col_search) > 0:
col = col_search[0]
df_new = df['year']
for match_col in match_cols:
df_col = df[match_col[0]].fillna(df[match_col[1]])
df_col.name = match_col[2] + '_' + match_col[3]
df_new = pd.concat([df_new, df_col], axis=1, join_axes=[df.index])
return df_new
def get_closest_fit(df, datatype):
'''
Find the data frame that is the closest fit to an average of all the data frame columns together
Input: The data frame with all the mer
'''
# Extract the values columns
columns = df.columns - ['year']
df_values = df[columns]
# Create a data frame that contains the average for a row
# axis=1 to compute the averages along left-right(?) axis, versus the up-down(?) axis
averages = df_values.mean(axis=1)
# Fill in the N/A values with the average using
df_new = pd.DataFrame(df['year'], columns=['year'], index=df.index)
for col in df_values.columns:
df_replace = df_values[col]
df_replace.name = col
df_replace = df_replace.fillna(averages)
# NOTE: For some versions of pandas, a DataFrame must be created explicitly before a concat
# In this case, this object, prior to creation as DataFrame, is created as a Series
df_replace = pd.DataFrame(df_replace, columns=[col], index=df_new.index)
df_new = pd.concat([df_new, df_replace], axis=1, join_axes=[df_values.index])
# Pick the first, arbitrarily, and then try to beat it
best_col = columns[0]
best_diff = abs(df_new[columns[0]] - averages).sum()
for col in columns:
diff = abs(df_new[columns[0]] - averages).sum()
if diff < best_diff:
best_diff = diff
best_col = col
# NOTE: For some versions of pandas, a DataFrame must be created explicitly before a concat
# In this case, this object, prior to creation as DataFrame, is created as a Series
df_averages = averages
#df_averages.name = 'Average ' + datatype + ' across all models'
df_averages.name = 'average'
df_averages = pd.DataFrame(df_averages, columns=[df_averages.name], index=df_new.index)
df_closest_fit = df_new
df_closest_fit = pd.concat([df_closest_fit, df_averages], axis=1, join_axes=[df_new.index])
return df_closest_fit
# This function creates a data frame that also includes the 'average' of all the other data
def create_dataframes(filename, datatype, log):
df = None
df_closest_fit = None
try:
# Read a tab delimited file.
df = pd.read_table(filename, engine='c', lineterminator='\n', na_values=[''])
# NOTE: This merges historic with future, but this is likely uncessary and may even be wrong.
# However, this done merely for convenience. Please remember to adjust this later. It will still
# be important to format the data frame column labels properly, and the correct method may be to
# create seperate data frames with adjusted column labels.
#df = merge_columns(df)
# Get the data frame that is the closest fit (may not be the best) based upon the averages of the value columns
# in the data frame
df_closest_fit = get_closest_fit(df, datatype)
except IOError as ioe:
if log:
logging.error(str(ioe))
return None
return df_closest_fit
def find_avg_dataframe(df, log=None, value_vars=list()):
try:
avg_col = None
for col in df.columns:
if 'average' in str(col):
avg_col = col
if avg_col != None:
df_avg = pd.melt(df, id_vars=['year'], value_vars=[avg_col])
if len(value_vars) == 0:
all_columns = list()
for col in df.columns:
all_columns.append(col)
all_columns.remove(avg_col)
all_columns.remove('year')
value_vars = all_columns
else:
if avg_col in value_vars:
value_vars.remove(avg_col)
df_lng = pd.melt(df, id_vars=['year'], value_vars=value_vars)
print("Found average dataframe")
return df_avg, df_lng
except KeyError as ke:
if log:
logging.error(str(ke))
else:
print("Could not find average dataframe")
return pd.DataFrame(), pd.DataFrame()
def get_avg_plot(plot_title, y_label, df, log):
#print('Creating plot for just average dataframe.')
df_avg, df_lng = find_avg_dataframe(df, None)
if len(df_avg) == 0:
print('Could not find average dataframe!')
else:
print(df_avg)
plot = ggplot(aes(x='year', y='value', color='variable'), data=df_avg)
plot += geom_point(data=df_avg)
plot += ggtitle(plot_title)
plot += xlab('Year')
plot += ylab(y_label)
fig = plot.draw()
return fig
def create_line_plot(plot_title, y_label, df, log, value_vars=list()):
#variable_colors = dict()
#colors = ['red', 'blue', 'green', 'orange', 'yellow', 'purple', 'black', 'cyan']
#colors_to_hex = { 'red': '#FF0000', 'blue': '#00000FF', 'green': '#00FF00', 'orange': '#CC79A7', 'yellow': '#AAAA00', 'purple': '#AA00AA', 'black': '#FFFFFF', 'cyan': '#00AAFF' }
#colors_to_col = dict()
#color_index = 0
#for col in df.columns:
#if col != 'year':
#variable_colors[col] = colors[color_index % len(colors)]
#colors_to_col[colors[color_index % len(colors)]] = col
#color_index += 1
# Transform the columns into id, variable, and values columns, using the year column as the id
df_lng = None
try:
df_aes_basis = pd.melt(df, id_vars=['year'])
df_lng = pd.melt(df, id_vars=['year'], value_vars=value_vars)
except KeyError as ke:
if log:
logging.error(str(ke))
return None
#df_avg, df_lng = find_avg_dataframe(df, log, value_vars)
#if len(df_avg) == 0 or len(df_lng) == 0:
#return None
#color_list = list()
#for row_index, row in df_lng.iterrows():
# color_list.append(variable_colors[row.variable])
#
#df_colors = pd.DataFrame(color_list, index=df_lng.index, columns=['color_mapping'])
#df_lng = pd.concat([df_lng, df_colors], axis=1, join_axes=[df_lng.index])
#
plot = ggplot(aes(x='year', y='value', color='variable'), data=df_lng)
#plot.add_to_legend(legend_type='color', legend_dict=colors_to_col)
#print plot.data._get_numeric_data().columns
#selected_color_list = list()
#for col in value_vars:
#selected_color_list.append(variable_colors[col])
#plot.manual_color_list = selected_color_list
#data_assigned_visual_mapping = assign_visual_mapping(data=df_aes_basis, aes=aes(x='year', y='value', color='variable'), gg=plot)
#print data_assigned_visual_mapping
plot += geom_line(aes(x='year', y='value', color='variable'), data=df_lng)
plot += ggtitle(plot_title)
plot += xlab('Year')
plot += ylab(y_label)
fig = plot.draw()
return fig
# NOTE: This is deprecated
def get_range_values(dframe, min_index_value1, max_index_value1, min_index_value2, max_index_value2, index_column):
'''
This method retrieves the values within a range between min_index_value and max_index_value indexed by index_key.
The range is searched first in the dframe1 data frame, then in both dframe1 and dframe2, and then in dframe2.
Even though the minimum and maximum index values are ordered appropriately, they differences within the ranges may
be less than zero because the values at the maximum index may be less than the values at the minimum index.
'''
# The following lines are to determine if a frame is null or not.
# The pandas library does not equate a data frame with None unless
# it is actually none. Hence, the try...except block
dframe_is_none = False
try:
if dframe == None:
dframe_is_none = True
except:
pass
# If the minimum values is greater than the maximum value then swap the two.
if min_index_value1 > max_index_value1:
temp = min_index_value1
min_index_value1 = max_index_value1
max_index_value1 = temp
if min_index_value2 > max_index_value2:
temp = min_index_value2
min_index_value2 = max_index_value2
max_index_value2 = temp
# Attempt to find the min and max values in the first data range.
if not dframe_is_none:
df1_values = dframe[dframe[index_column] >= min_index_value1]
df1_values = dframe[dframe[index_column] <= max_index_value1]
upper_bound_values = df1_values.mean()
#print(str(df1_mean_values))
# Attempt to find the min an max values in the second data range.
df2_values = dframe[dframe[index_column] >= min_index_value2]
df2_values = dframe[dframe[index_column] <= max_index_value2]
lower_bound_values = df2_values.mean()
#print(str(df2_mean_values))
diff_values = pd.DataFrame(np.ones((1, len(dframe.columns))), columns=dframe.columns)
for col in diff_values.columns - [index_column]:
diff_values[col] = float(upper_bound_values[col]) - float(lower_bound_values[col])
return diff_values
def get_data_without_extremes(data, percent):
if percent > 0.5:
return None
data_without_extremes = dict()
for model in data:
#print("Reading model " + model)
index_min = int(round(float(len(data[model])) * percent)) - 1
index_max = len(data[model]) - index_min
#print("Index min: " + str(index_min))
#print("Index max: " + str(index_max))
values_array = np.array(data[model])
values_array.sort()
cutoff_min = values_array[index_min]
cutoff_max = values_array[index_max]
#print("Cutoff min: " + str(cutoff_min))
#print("Cutoff max: " + str(cutoff_max))
data_without_extremes[model] = data[model].copy()
# This is an exclusive interval at or below the min_index, or
# anything at or above the max_index is excluded
data_model_mean = data[model].mean()
#print("The model mean is: " + str(data_model_mean))
for i in range(len(data[model])):
if data[model][i] < cutoff_min:
#print("Found a value at the minimum cutoff value")
data_without_extremes[model][i] = data_model_mean
#print("Value at index " + str(i) + " is " + str(data_without_extremes[model][i]))
#print("Original value is " + str(data[model][i]))
if data[model][i] > cutoff_max:
#print("Found a value at the maximum cutoff value")
data_without_extremes[model][i] = data_model_mean
#print("Value at index " + str(i) + " is " + str(data_without_extremes[model][i]))
#print("Original value is " + str(data[model][i]))
return data_without_extremes
# This function is deprecated
def get_values_without_extremes(df, x_percent, index_column):
'''
This searches through a data frame and removes those values that above and below the specified percentage.
However, the input data frame would remain intact, except the extremes will be replaced with the average.
This is only to simplify the accessing of the dataframe later on, as its structure remains the same, but the
extreme values are removed and replaced with the mean.
This returns the resulting dataframe.
'''
# If the desired percent is 50% then that would leave nothing to search!
if x_percent > 0.5:
return None
data_columns = df.columns - [index_column]
num_x_percent = int(round(len(df) * x_percent))
for col in data_columns:
values_array = np.array(df[col])
values_array.sort()
cutoff_min = values_array[num_x_percent]
cutoff_max = values_array[len(df[col]) - num_x_percent - 1]
# Replace the values that are less than the minimum cutoff value or greater than the maximum cutoff value
df[col] = df[col].replace(df[col][df[col] < cutoff_min], df[col].mean())
df[col] = df[col].replace(df[col][df[col] > cutoff_max], df[col].mean())
return df
def test_func():
print("Testing - is this here")
def create_temp_vs_precip_scatter_plot_r2(plot_title, df_temp, df_precip, x_percent, historic_start_year, historic_end_year, future_start_year, future_end_year):
'''
This creates a temperature vs. percipitation plot, with extreme values that fall outside the percentile range
replaced with the mean values of the respective columns. The values of the plot are derived by taking the difference
of the values between the min_year and max_year.
Note, the temperature and precipation data frames are the resulting input from a temperature and a precipitation
file respectively.
'''
#temp_diffs_without_extremes = get_data_without_extremes(temp_diffs, x_percent)
#temp_diffs_min_max = np.percentile(temp_diffs.values, [x_percent * 100, 100 - x_percent * 100])
#precip_diffs_min_max = np.percentil(precip_diffs.values, [x_perecent * 100, 100 - x_percent * 100])
#precip_diffs_without_extremes = get_data_without_extremes(precip_diffs, x_percent)
data_rows = list()
#model_medians = [np.ones(len(temp_diffs)), np.ones(len(precip_diffs))]
model_index = 0
for model in temp_diffs:
row = [model, temp_diffs[model], precip_diffs[model]]
data_rows.append(row)
df_data = pd.DataFrame(data_rows)
print(df_data.head())
#for model in temp_diffs:
# So the calculation should be for all the years for all the models
# The current calculation is taking the average
#median_index = round(float(len(temp_diffs[model])) / 2.0)
#model_column = np.array([model for i in range(len(temp_diffs[model]))])
#print(model_column)
#table = [model_column, temp_diffs[model], precip_diffs[model]]
#print(table)
#rows = list()
#data_rows.append(table[0:10])
#temp_median = temp_diffs[model][median_index]
#precip_median = precip_diffs[model][median_index]
#model_medians[0][model_index] = temp_median
#model_medians[1][model_index] = precip_median
#model_index += 1
#median_index = round(float(len(temp_diffs)) / 2.0)
#model_medians[0].sort()
#model_medians[1].sort()
#data_rows.append(['median', model_medians[0][median_index], model_medians[1][median_index]])
#print(table[0:10])
x_column = 'Temperature'
y_column = 'Precipitation'
color_column = 'Model'
#df_data = pd.DataFrame(pd.concat([df_temp, df_precip]), columns=[color_column, x_column, y_column])
#df_data = pd.melt(df_data, id_vars=["Model"], value_vars=["Temperature", "Precipitation"])
#print(df_data.columns)
#print(df_data["Model"].head())
plot = ggplot(aes(x=x_column, y=y_column), data=df_data)
plot += geom_point()
xmax = None
xmin = None
ymax = None
ymin = None
xmin_candidates = []
xmax_candidates = []
ymin_candidates = []
ymax_candidates = []
for model in temp_diffs:
# Copy data into alternate data frames so that the values may
# be changed without affecting the original data frames
temp_values_sorted = temp_diffs[model]
precip_values_sorted = precip_diffs[model]
temp_values_sorted.sort()
precip_values_sorted.sort()
percentile_min = x_percent * 100
percentile_max = 100 - x_percent * 100
x_boundaries = np.percentile(temp_diffs[model], [percentile_min, percentile_max])
y_boundaries = np.percentile(precip_diffs[model], [percentile_min, percentile_max])
print(x_boundaries)
print(y_boundaries)
if not np.isnan(x_boundaries[0]):
xmin_candidates.append(x_boundaries[0])
if not np.isnan(x_boundaries[1]):
xmax_candidates.append(x_boundaries[1])
if not np.isnan(y_boundaries[0]):
ymin_candidates.append(y_boundaries[0])
if not np.isnan(y_boundaries[1]):
ymax_candidates.append(y_boundaries[1])
#first_index = 0
#last_index = len(temp_values_sorted) - 1
#if xmax == None or xmax < temp_values_sorted[last_index]:
#xmax = temp_values_sorted[last_index]
#if xmin == None or xmin < temp_values_sorted[first_index]:
#xmin = temp_values_sorted[first_index]
#if ymax == None or ymax < precip_values_sorted[last_index]:
#ymax = precip_values_sorted[last_index]
#if ymin == None or ymin < precip_values_sorted[last_index]:
#ymin = precip_values_sorted[first_index]
x_max = max(xmax_candidates)
x_min = min(xmin_candidates)
y_max = max(ymax_candidates)
y_min = min(ymin_candidates)
# Now, attempt to find the points that are closest to the xmin, xmax, ymin, ymax
#print("Median temp: " + str(model_medians[0][median_index]))
#print("Median precip: " + str(model_medians[1][median_index]))
print("Min temp: " + str(x_min))
print("Max temp: " + str(x_max))
print("Min precip: " + str(y_min))
print("Max precip: " + str(y_max))
plot += geom_rect(aes(xmin=x_min, xmax=x_max, ymin=y_min, ymax=y_max, fill='#00ff00', alpha=0.05))
plot += geom_vline(aes(xintercept=x_min, ymin=y_min, ymax=y_max, color="#000000", linetype='solid'))
plot += geom_vline(aes(xintercept=x_max, ymin=y_min, ymax=y_max, color="#000000", linetype='solid'))
plot += geom_hline(aes(xmin=x_min, xmax=x_max, yintercept=y_min, color="#000000", linetype='solid'))
plot += geom_hline(aes(xmin=x_min, xmax=x_max, yintercept=y_max, color="#000000", linetype='solid'))
# Set up plot details
plot += ggtitle(plot_title)
plot += xlab(x_column + ' Farenheit')
plot += ylab('Chance of ' + y_column)
fig = plot.draw()
return fig
def create_temp_vs_precip_scatter_plot(plot_title, df_temp, df_precip, x_percent, min_year1, max_year1, min_year2, max_year2):
'''
This creates a temperature vs. percipitation plot, with extreme values that fall outside the percentile range
replaced with the mean values of the respective columns. The values of the plot are derived by taking the difference
of the values between the min_year and max_year.
Note, the temperature and precipation data frames are the resulting input from a temperature and a precipitation
file respectively.
'''
year_column = 'year'
df_temp_diff = get_range_values(df_temp, min_year1, max_year1, min_year2, max_year2, index_column=year_column)
df_precip_diff = get_range_values(df_precip, min_year1, max_year1, min_year2, max_year2, index_column=year_column)
# Ensure that the values are legitimate
df_temp_is_none = False
try:
df_temp_is_none = df_temp == None
except:
pass
df_precip_is_none = False
try:
df_precip_is_none = df_precip == None
except:
pass
if df_temp_is_none or len(df_temp) <= 0:
return None
if df_precip_is_none or len(df_precip) <= 0:
return None
df_temp_without_extremes = get_values_without_extremes(df_temp_diff, x_percent, index_column=year_column)
df_precip_without_extremes = get_values_without_extremes(df_precip_diff, x_percent, index_column=year_column)
#print('Temp diffs: ' + str(df_temp_without_extremes))
#print('Precip diffs: ' + str(df_precip_without_extremes))
data_columns = df_temp_without_extremes.columns - [year_column]
data_rows = list()
for col in data_columns:
row = [col, df_temp_diff[col][0], df_precip_diff[col][0]]
data_rows.append(row)
# Add the median point
data_rows.append(['median', df_temp_diff.median().median(), df_precip_diff.median().median()])
x_column = 'Temperature'
y_column = 'Precipitation'
color_column = 'Model'
df_data = pd.DataFrame(data_rows, columns=[color_column, x_column, y_column])
plot = ggplot(aes(x=x_column, y=y_column, color=color_column), data=df_data)
# Add scatter plot (geom_point)
plot += geom_point()
# The minimum and maximum values may contain multiple columns. This process keeps the data for each model
# contained, for the moment.
xmax = df_temp_without_extremes.max().max()
xmin = df_temp_without_extremes.min().min()
ymax = df_precip_without_extremes.max().max()
ymin = df_precip_without_extremes.min().min()
#print('xmin: ' + str(xmin) + ' xmax: ' + str(xmax) + ' ymin: ' + str(ymin) + ' ymax: ' + str(ymax))
# Plot a bounding rectangle that defines the maximum and minimum differences, excluding the extreme values.
plot += geom_rect(aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill='#ff0000', alpha=0.005))
plot += geom_vline(aes(x=xmin, ymin=ymin, ymax=ymax, linetype='solid'))
plot += geom_vline(aes(x=xmax, ymin=ymin, ymax=ymax, linetype='solid'))
plot += geom_hline(aes(xmin=xmin, xmax=xmax, y=ymin, linetype='solid'))
plot += geom_hline(aes(xmin=xmin, xmax=xmax, y=ymax, linetype='solid'))
# Set up plot details
plot += ggtitle(plot_title)
plot += xlab(x_column + ' Farenheit')
plot += ylab(y_column + ' Millimeters')
fig = plot.draw()
return fig
| gpl-2.0 |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py | 69 | 42525 | """
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| agpl-3.0 |
Fireblend/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/frame/test_constructors.py | 7 | 73312 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import nose
from numpy.random import randn
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from pandas.types.common import is_integer_dtype
from pandas.compat import (lmap, long, zip, range, lrange, lzip,
OrderedDict, is_platform_little_endian)
from pandas import compat
from pandas import (DataFrame, Index, Series, isnull,
MultiIndex, Timedelta, Timestamp,
date_range)
from pandas.core.common import PandasError
import pandas as pd
import pandas.core.common as com
import pandas.lib as lib
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_constructor(self):
df = DataFrame()
self.assertEqual(len(df.index), 0)
df = DataFrame(data={})
self.assertEqual(len(df.index), 0)
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
self.assertEqual(foo['a'].dtype, object)
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
self.assertRaises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
self.assertEqual(orig_df['col1'][0], 1.)
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
self.assertEqual(df.values[0, 0], 99)
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
self.assertEqual(df.values[0, 0], 97)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
self.assertIsNone(df.ix[1, 0])
self.assertEqual(df.ix[0, 1], '2')
def test_constructor_list_frames(self):
# GH 3243
result = DataFrame([DataFrame([])])
self.assertEqual(result.shape, (1, 0))
result = DataFrame([DataFrame(dict(A=lrange(5)))])
tm.assertIsInstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update(dict([(d, a) for d, a in zipper]))
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
self.assertEqual(a.dtype, df.a.dtype)
self.assertEqual(b.dtype, df.b.dtype)
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
# Assigning causes segfault in NumPy < 1.5.1
# rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
self.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
self.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
self.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
self.assertEqual(df.values.dtype, np.bool_)
def test_constructor_overflow_int64(self):
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
self.assertEqual(result['a'].dtype, object)
# #2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
self.assertEqual(df_crawls['uid'].dtype, object)
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
self.assertEqual(expected, list(df.columns))
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
self.assertEqual(len(self.ts1), 30)
self.assertEqual(len(self.ts2), 25)
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assertNotIn('col1', frame)
self.assertTrue(isnull(frame['col3']).all())
# Corner cases
self.assertEqual(len(DataFrame({})), 0)
# mix dict and array, wrong size - no spec for which error should raise
# first
with tm.assertRaises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
self.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
self.assertIs(frame.index, idx)
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
self.assertIs(frame.index, idx)
self.assertIs(frame.columns, idx)
self.assertEqual(len(frame._series), 3)
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
tm.assert_equal(frame_none.get_value(0, 'a'), None)
tm.assert_equal(frame_none_list.get_value(0, 'a'), None)
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7})
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['a'])
with tm.assertRaises(ValueError):
DataFrame({'a': 0.7}, columns=['b'])
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with tm.assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
with tm.assertRaisesRegexp(ValueError, "Shape of passed values is "
r"\(3, 2\), indices imply \(3, 1\)"):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
with tm.assertRaisesRegexp(ValueError, "Shape of passed values is "
r"\(3, 2\), indices imply \(2, 2\)"):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
with tm.assertRaisesRegexp(ValueError, 'If using all scalar values, '
'you must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame(dict((col, dict(compat.iteritems(val)))
for col, val in compat.iteritems(data)))
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.float64)
self.assertEqual(frame['A'].dtype, np.float64)
frame = DataFrame(test_data)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.object_)
self.assertEqual(frame['A'].dtype, np.float64)
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assertEqual(frame['A'].dtype, np.object_)
self.assertEqual(frame['B'].dtype, np.float64)
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
tm.assertIsInstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
tm.assertIsInstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame(dict((k, list(v))
for k, v in compat.iteritems(data)))
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_frame_equal(
result, expected, check_dtype=True, check_index_type=True,
check_column_type=True, check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return dict((i, {constructor(s): 2 * i})
for i, s in enumerate(dates_as_str))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return dict((i, {constructor(s): 2 * i})
for i, s in enumerate(td_as_int))
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
self.assertEqual(df['a'].dtype, 'object')
self.assertEqual(df['b'].dtype, 'object')
# list of periods
df = pd.DataFrame({'a': a.asobject.tolist(),
'b': b.asobject.tolist()})
self.assertEqual(df['a'].dtype, 'object')
self.assertEqual(df['b'].dtype, 'object')
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shpae (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.columns), 1)
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with tm.assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
self.assert_index_equal(frame.index, pd.Index(lrange(2)))
self.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
self.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
self.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
self.assertEqual(len(frame.index), 0)
frame = DataFrame(empty((3, 0)))
self.assertEqual(len(frame.columns), 0)
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1.0, frame['A'][1])
self.assertEqual(2.0, frame['C'][2])
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertTrue(np.all(~np.asarray(frame == frame)))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
self.assertEqual(frame.values.dtype, np.float64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'][1])
self.assertEqual(2, frame['C'][2])
# masked np.datetime64 stays (use lib.NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(isnull(frame).values.all())
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'].view('i8')[1])
self.assertEqual(2, frame['C'].view('i8')[2])
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
self.assertEqual(frame.values.dtype, object)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(True, frame['A'][1])
self.assertEqual(False, frame['C'][2])
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = dict([(k, v.filled()) if hasattr(
v, 'filled') else (k, v) for k, v in comb])
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner(self):
df = DataFrame(index=[])
self.assertEqual(df.values.shape, (0, 0))
# empty but with specified dtype
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)
self.assertEqual(df.values.dtype, np.object_)
# does not error but ends up float
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
self.assertEqual(df.values.dtype, np.object_)
# #1783 empty dtype object
df = DataFrame({}, columns=['foo', 'bar'])
self.assertEqual(df.values.dtype, np.object_)
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
dtype=int)
self.assertEqual(df.values.dtype, np.object_)
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
self.assertEqual(df['int'].dtype, np.int64)
self.assertEqual(df['bool'].dtype, np.bool_)
self.assertEqual(df['float'].dtype, np.float64)
self.assertEqual(df['complex'].dtype, np.complex128)
self.assertEqual(df['object'].dtype, np.object_)
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with tm.assertRaisesRegexp(ValueError, 'must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
self.assertEqual(df_casted.values.dtype, np.int64)
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
self.assertEqual(dm.values.ndim, 2)
arr = randn(0)
dm = DataFrame(arr)
self.assertEqual(dm.values.ndim, 2)
self.assertEqual(dm.values.ndim, 2)
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 2))
dm = DataFrame(columns=['A', 'B'])
self.assertEqual(dm.values.shape, (0, 2))
dm = DataFrame(index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 0))
# corner, silly
# TODO: Fix this Exception to be better...
with tm.assertRaisesRegexp(PandasError, 'constructor not '
'properly called'):
DataFrame((1, 2, 3))
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with tm.assertRaisesRegexp(ValueError, 'cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.float64)
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
l = [[1, 'a'], [2, 'b']]
df = DataFrame(data=l, columns=["num", "str"])
self.assertTrue(is_integer_dtype(df['num']))
self.assertEqual(df['str'].dtype, np.object_)
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: range(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
import collections
class DummyContainer(collections.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(l, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame.from_items([('A', array.array('i', range(10)))])
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with tm.assertRaisesRegexp(ValueError,
'arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
self.assertTrue(result.index.is_monotonic)
# ordering ambiguous, raise exception
with tm.assertRaisesRegexp(ValueError, 'ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
self.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
self.assertRaises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
self.assertEqual(df1.columns[0], 'x')
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
self.assertEqual(df2.columns[0], 0)
self.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
self.assert_index_equal(result.index, Index(index))
self.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
self.assertEqual(recons['A'].dtype, np.float64)
with tm.assertRaisesRegexp(TypeError,
"Must pass columns with orient='index'"):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = lib.list_to_object_array(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
tm.assertIsInstance(recons['foo'][0], tuple)
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])
with tm.assertRaisesRegexp(ValueError, 'does not match index length'):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_iterator_failure(self):
with tm.assertRaisesRegexp(TypeError, 'iterator'):
df = DataFrame(iter([1, 2, 3])) # noqa
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_items(
[('a', [8]), ('a', [5])], columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
self.assertRaises(ValueError, DataFrame.from_items,
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])
self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])
with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result.sort_index()
expected = Series(expected)
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
self.assertEqual(datetime_s.dtype, 'M8[ns]')
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
self.assertEqual(df.iat[0, 0], dt)
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
self.assertEqual(df.iat[0, 0], dt)
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
self.assertTrue(df.iat[0, 0].tz is None)
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
self.assertTrue(str(df.iat[0, 0].tz) == 'UTC')
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
self.assertTrue(str(df.iat[0, 0].tz) == 'US/Eastern')
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
self.assertTrue((cop['A'] == 5).all())
self.assertFalse((self.frame['A'] == 5).all())
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
self.assertTrue((df.values[5] == 5).all())
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
self.assertFalse((df.values[6] == 6).all())
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
self.assertFalse((series['A'] == 5).all())
def test_constructor_with_nas(self):
# GH 5016
# na's in indicies
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
# allow single nans to succeed
indexer = np.arange(len(df.columns))[isnull(df.columns)]
if len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should fail
else:
def f():
df.loc[:, np.nan]
self.assertRaises(TypeError, f)
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
self.assertEqual(d['a'].dtype, np.object_)
self.assertFalse(d['a'][1])
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
self.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with tm.assertRaisesRegexp(ValueError, msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
self.assertEqual(len(records.dtype.names), 3)
records = indexed_frame.to_records(index=False)
self.assertEqual(len(records.dtype.names), 2)
self.assertNotIn('index', records.dtype.names)
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
self.assertTrue(np.isnan(df['c'][0]))
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
self.assertEqual(columns, original_columns)
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
self.assertEqual(df['a'].dtype, object)
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
self.assertEqual(df['a'].dtype, np.float64)
self.assertTrue(np.isnan(df['a'].values[-1]))
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
self.assertEqual(result.index.name, 'order_id')
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
self.assertEqual(result.index.names, ('order_id', 'quantity'))
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
self.assertEqual(df.index.name, 'id')
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
raise nose.SkipTest("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
raise nose.SkipTest("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df.blocks
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 0)
self.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result = DataFrame.from_records([])
self.assertEqual(len(result), 0)
self.assertEqual(len(result.columns), 0)
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# columns is in a different order here than the actual items iterated
# from the dict
columns = []
for dtype, b in compat.iteritems(df.blocks):
columns.extend(b.columns)
asdict = dict((x, y) for x, y in compat.iteritems(df))
asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(
asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns)
.reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns)
.reindex(columns=df.columns))
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index='C')
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])
self.assertRaises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
self.assertTrue(np.array_equal(result.columns, ['bar']))
self.assertEqual(len(result), 0)
self.assertEqual(result.index.name, 'foo')
class TestDataFrameConstructorWithDatetimeTZ(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
# construction
df = DataFrame({'A': idx, 'B': dr})
self.assertTrue(df['A'].dtype, 'M8[ns, US/Eastern')
self.assertTrue(df['A'].name == 'A')
tm.assert_series_equal(df['A'], Series(idx, name='A'))
tm.assert_series_equal(df['B'], Series(dr, name='B'))
def test_from_index(self):
# from index
idx2 = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
idx2 = date_range('20130101', periods=3, tz='US/Eastern')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], Series(idx2, name=0))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2[0], Series(idx2, name=0))
if __name__ == '__main__':
import nose # noqa
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/frame/common.py | 13 | 4708 | import numpy as np
from pandas import compat
from pandas.util._decorators import cache_readonly
import pandas.util.testing as tm
import pandas as pd
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = pd.DataFrame(_seriesd)
_frame2 = pd.DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = pd.DataFrame(dict((k, v.astype(int))
for k, v in compat.iteritems(_seriesd)))
_tsframe = pd.DataFrame(_tsd)
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class TestData(object):
@cache_readonly
def frame(self):
return _frame.copy()
@cache_readonly
def frame2(self):
return _frame2.copy()
@cache_readonly
def intframe(self):
# force these all to int64 to avoid platform testing issues
return pd.DataFrame(dict([(c, s) for c, s in
compat.iteritems(_intframe)]),
dtype=np.int64)
@cache_readonly
def tsframe(self):
return _tsframe.copy()
@cache_readonly
def mixed_frame(self):
return _mixed_frame.copy()
@cache_readonly
def mixed_float(self):
return pd.DataFrame({'A': _frame['A'].copy().astype('float32'),
'B': _frame['B'].copy().astype('float32'),
'C': _frame['C'].copy().astype('float16'),
'D': _frame['D'].copy().astype('float64')})
@cache_readonly
def mixed_float2(self):
return pd.DataFrame({'A': _frame2['A'].copy().astype('float32'),
'B': _frame2['B'].copy().astype('float32'),
'C': _frame2['C'].copy().astype('float16'),
'D': _frame2['D'].copy().astype('float64')})
@cache_readonly
def mixed_int(self):
return pd.DataFrame({'A': _intframe['A'].copy().astype('int32'),
'B': np.ones(len(_intframe['B']), dtype='uint64'),
'C': _intframe['C'].copy().astype('uint8'),
'D': _intframe['D'].copy().astype('int64')})
@cache_readonly
def all_mixed(self):
return pd.DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'float32': np.array([1.] * 10, dtype='float32'),
'int32': np.array([1] * 10, dtype='int32')},
index=np.arange(10))
@cache_readonly
def tzframe(self):
result = pd.DataFrame({'A': pd.date_range('20130101', periods=3),
'B': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'C': pd.date_range('20130101', periods=3,
tz='CET')})
result.iloc[1, 1] = pd.NaT
result.iloc[1, 2] = pd.NaT
return result
@cache_readonly
def empty(self):
return pd.DataFrame({})
@cache_readonly
def ts1(self):
return tm.makeTimeSeries(nper=30)
@cache_readonly
def ts2(self):
return tm.makeTimeSeries(nper=30)[5:]
@cache_readonly
def simple(self):
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
return pd.DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
# self.ts3 = tm.makeTimeSeries()[-5:]
# self.ts4 = tm.makeTimeSeries()[1:-1]
def _check_mixed_float(df, dtype=None):
# float16 are most likely to be upcasted to float32
dtypes = dict(A='float32', B='float32', C='float16', D='float64')
if isinstance(dtype, compat.string_types):
dtypes = dict([(k, dtype) for k, v in dtypes.items()])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
def _check_mixed_int(df, dtype=None):
dtypes = dict(A='int32', B='uint64', C='uint8', D='int64')
if isinstance(dtype, compat.string_types):
dtypes = dict([(k, dtype) for k, v in dtypes.items()])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
| apache-2.0 |
ldirer/scikit-learn | examples/feature_selection/plot_feature_selection.py | 5 | 2920 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange',
edgecolor='black')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy', edgecolor='black')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c',
edgecolor='black')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
jpeterbaker/maxfield | lib/PlanPrinter.py | 1 | 19907 | '''
This file is part of Maxfield.
Maxfield is a planning tool for helping Ingress players to determine
an efficient plan to create many in-game fields.
Copyright (C) 2015 by Jonathan Baker: [email protected]
Maxfield is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Maxfield is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Maxfield. If not, see <http://www.gnu.org/licenses/>.
'''
# Sorry that this whole file is so messy. Input/output things are tough to make tidy.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import geometry
np = geometry.np
import agentOrder
import networkx as nx
import electricSpring
import time
# returns the points in a shrunken toward their centroid
def shrink(a):
centroid = a.mean(1).reshape([2,1])
return centroid + .9*(a-centroid)
def commaGroup(n):
# Returns a string of n with commas in place
s = str(n)
return ','.join([ s[max(i,0):i+3] for i in range(len(s)-3,-3,-3)][::-1])
class PlanPrinter:
def __init__(self,a,outputDir,nagents,color='#FF004D'):
self.a = a
self.n = a.order() # number of nodes
self.m = a.size() # number of links
self.nagents = nagents
self.outputDir = outputDir
self.color = color
# if the ith link to be made is (p,q) then orderedEdges[i] = (p,q)
self.orderedEdges = [None]*self.m
for e in a.edges_iter():
self.orderedEdges[a.edge[e[0]][e[1]]['order']] = e
# movements[i][j] is the index (in orderedEdges) of agent i's jth link
self.movements = agentOrder.getAgentOrder(a,nagents,self.orderedEdges)
# link2agent[i] is the agent that will make the ith link
self.link2agent = [-1]*self.m
for i in range(nagents):
for e in self.movements[i]:
self.link2agent[e] = i
# keyneeds[i,j] = number of keys agent i needs for portal j
self.agentkeyneeds = np.zeros([self.nagents,self.n],dtype=int)
for i in xrange(self.nagents):
for e in self.movements[i]:
p,q = self.orderedEdges[e]
self.agentkeyneeds[i][q] += 1
self.names = np.array([a.node[i]['name'] for i in xrange(self.n)])
# The alphabetical order
makeLowerCase = np.vectorize(lambda s: s.lower())
self.nameOrder = np.argsort(makeLowerCase(self.names))
self.xy = np.array([self.a.node[i]['xy'] for i in xrange(self.n)])
# The order from north to south (for easy-to-find labels)
self.posOrder = np.argsort(self.xy,axis=0)[::-1,1]
# The inverse permutation of posOrder
self.nslabel = [-1]*self.n
for i in xrange(self.n):
self.nslabel[self.posOrder[i]] = i
self.maxNameLen = max([len(a.node[i]['name']) for i in xrange(self.n)])
def keyPrep(self):
rowFormat = '{0:11d} | {1:6d} | {2}\n'
with open(self.outputDir+'keyPrep.txt','w') as fout:
fout.write( 'Keys Needed | Lacked | %s\n'\
%time.strftime('%Y-%m-%d %H:%M:%S %Z'))
for i in self.nameOrder:
keylack = max(self.a.in_degree(i)-self.a.node[i]['keys'],0)
fout.write(rowFormat.format(\
self.a.in_degree(i),\
keylack,\
self.names[i]\
))
unused = set(xrange(self.n))
infirst = []
outfirst = []
for p,q in self.orderedEdges:
if p in unused:
outfirst.append(self.names[p])
unused.remove(p)
if q in unused:
infirst.append(self.names[q])
unused.remove(q)
infirst.sort()
outfirst.sort()
with open(self.outputDir+'ownershipPrep.txt','w') as fout:
fout.write("These portals' first links are incoming %s\n"\
%time.strftime('%Y-%m-%d %H:%M:%S %Z'))
fout.write('They should be at full resonators before linking\n\n')
for s in infirst:
fout.write(' %s\n'%s)
fout.write("\nThese portals' first links are outgoing\n\n")
fout.write('Their resonators can be applied when first agent arrives\n')
for s in outfirst:
fout.write(' %s\n'%s)
def agentKeys(self):
rowFormat = '%4s %4s %s\n'
for agent in range(self.nagents):
with open(self.outputDir+'keys_for_agent_%s_of_%s.txt'\
%(agent+1,self.nagents),'w') as fout:
fout.write('Keys for Agent %s of %s %s\n\n'\
%(agent+1,self.nagents, time.strftime('%Y-%m-%d %H:%M:%S %Z')))
fout.write('Map# Keys Name\n')
for portal in self.nameOrder:
keys = self.agentkeyneeds[agent,portal]
if self.agentkeyneeds[agent,portal] == 0:
keys = ''
fout.write(rowFormat%(\
self.nslabel[portal],\
keys,\
self.names[portal]\
))
def drawBlankMap(self):
plt.plot(self.xy[:,0],self.xy[:,1],'o',ms=16,color=self.color)
for i in xrange(self.n):
plt.text(self.xy[i,0],self.xy[i,1],self.nslabel[i],\
fontweight='bold',ha='center',va='center',fontsize=10)
def drawSubgraph(self,edges=None):
'''
Draw a subgraph of a
Only includes the edges in 'edges'
Default is all edges
'''
if edges is None:
edges = range(self.m)
# anchors = np.array([ self.xy[self.orderedEdges[e],:] for e in edges]).mean(1)
# edgeLabelPos = electricSpring.edgeLabelPos(self.xy,anchors)
#
# self.drawBlankMap()
#
# for i in xrange(len(edges)):
# j = edges[i]
# p,q = self.orderedEdges[j]
# plt.plot([ self.xy[p,0],edgeLabelPos[i,0],self.xy[q,0] ] ,\
# [ self.xy[p,1],edgeLabelPos[i,1],self.xy[q,1] ],'r-')
#
# plt.text(edgeLabelPos[i,0],edgeLabelPos[i,1],j,\
# ha='center',va='center')
### The code below works. It just uses networkx draw functions
if edges is None:
b = self.a
else:
b = nx.DiGraph()
b.add_nodes_from(xrange(self.n))
b = nx.DiGraph()
b.add_nodes_from(xrange(self.n))
for e in edges:
p,q = self.orderedEdges[e]
b.add_edge(p,q,{'order':e})
edgelabels = dict([ (e,self.a.edge[e[0]][e[1]]['order'])\
for e in b.edges_iter() ])
plt.plot(self.xy[:,0],self.xy[:,1],'o',ms=16,color=self.color)
for j in xrange(self.n):
i = self.posOrder[j]
plt.text(self.xy[i,0],self.xy[i,1],j,\
fontweight='bold',ha='center',va='center')
try:
nx.draw_networkx_edge_labels(b,self.ptmap,edgelabels)
except AttributeError:
self.ptmap = dict([(i,self.a.node[i]['xy']) for i in xrange(self.n) ])
nx.draw_networkx_edge_labels(b,self.ptmap,edgelabels)
# edge_color does not seem to support arbitrary colors easily
if self.color == '#3BF256':
nx.draw_networkx_edges(b,self.ptmap,edge_color='g')
elif self.color == '#2ABBFF':
nx.draw_networkx_edges(b,self.ptmap,edge_color='b')
else:
nx.draw_networkx_edges(b,self.ptmap,edge_color='k')
plt.axis('off')
def planMap(self):
xmin = self.xy[:,0].min()
xmax = self.xy[:,0].max()
ymin = self.xy[:,1].min()
ymax = self.xy[:,1].max()
xylims = np.array([xmin,xmax,ymin,ymax])
xylims *= 1.1
# Plot labels aligned to avoid other portals
for j in xrange(self.n):
i = self.posOrder[j]
plt.plot(self.xy[i,0],self.xy[i,1],'o',color=self.color)
displaces = self.xy[i] - self.xy
displaces[i,:] = np.inf
nearest = np.argmin(np.abs(displaces).sum(1))
if self.xy[nearest,0] < self.xy[i,0]:
ha = 'left'
else:
ha = 'right'
if self.xy[nearest,1] < self.xy[i,1]:
va = 'bottom'
else:
va = 'top'
plt.text(self.xy[i,0],self.xy[i,1],str(j),ha=ha,va=va)
fig = plt.gcf()
fig.set_size_inches(6.5,9)
plt.axis(xylims)
plt.axis('off')
plt.title('Portals numbered north to south\nNames on key list')
plt.savefig(self.outputDir+'portalMap.png')
# plt.show()
plt.clf()
# Draw the map with all edges in place and labeled
self.drawSubgraph()
# self.drawBlankMap()
plt.axis(xylims)
plt.axis('off')
plt.title('Portal and Link Map')
plt.savefig(self.outputDir+'linkMap.png')
plt.clf()
# for agent in range(self.nagents):
# self.drawSubgraph(self.movements[agent])
# plt.axis(xylims)
# plt.savefig(self.outputDir+'linkMap_agent_%s_of_%s.png'%(agent+1,self.nagents))
# plt.clf()
def agentLinks(self):
# Total distance traveled by each agent
agentdists = np.zeros(self.nagents)
# Total number of links, fields for each agent
agentlinkcount = [0]*self.nagents
agentfieldcount = [0]*self.nagents
totalAP = 0
totalDist = 0
for i in range(self.nagents):
movie = self.movements[i]
# first portal in first link
curpos = self.a.node[self.orderedEdges[movie[0]][0]]['geo']
agentlinkcount[i] = len(movie)
for e in movie[1:]:
p,q = self.orderedEdges[e]
newpos = self.a.node[p]['geo']
dist = geometry.sphereDist(curpos,newpos)
# print 'Agent %s walks %s to %s'%(i,dist,self.nslabel[p])
agentdists[i] += dist
curpos = newpos
agentfieldcount[i] += len(self.a.edge[p][q]['fields'])
totalAP += 313
totalAP += 1250 * len(self.a.edge[p][q]['fields'])
totalDist += dist
# Different formatting for the agent's own links
# plainStr = '{0:4d}{1:1s} {2: 5d}{3:5d} {4:s}\n {5:4d} {6:s}\n\n'
plainStr = '{0:4d}{1:1s} {2: 5d}{3:5d} {4:s} -> {5:d} {6:s}\n'
hilitStr = '{0:4d}{1:1s} {2:_>5d}{3:5d} {4:s}\n {5:4d} {6:s}\n\n'
totalTime = self.a.walktime+self.a.linktime+self.a.commtime
for agent in range(self.nagents):
with open(self.outputDir+'links_for_agent_%s_of_%s.txt'\
%(agent+1,self.nagents),'w') as fout:
fout.write('Complete link schedule issued to agent %s of %s %s\n\n'\
%(agent+1,self.nagents,time.strftime('%Y-%m-%d %H:%M:%S %Z')))
fout.write('\nLinks marked with * can be made EARLY\n')
fout.write('----------- PLAN DATA ------------\n')
fout.write('Minutes: %s minutes\n'%int(totalTime/60+.5))
fout.write('Total Distance: %s meter\n'%int(totalDist))
fout.write('Total AP: %s\n'%totalAP)
fout.write('AP per Agent per minute: %0.2f AP/Agent/min\n'%float(totalAP/self.nagents/(totalTime/60+.5)))
fout.write('AP per Agent per meter: %0.2f AP/Agent/m\n'%float(totalAP/self.nagents/totalDist))
agentAP = 313*agentlinkcount[agent] + 1250*agentfieldcount[agent]
fout.write('----------- AGENT DATA -----------\n')
fout.write('Distance traveled: %s m (%s %%)\n'%(int(agentdists[agent]),int(100*agentdists[agent]/totalDist)))
fout.write('Links made: %s\n'%(agentlinkcount[agent]))
fout.write('Fields completed: %s\n'%(agentfieldcount[agent]))
fout.write('Total experience: %s AP (%s %%)\n'%(agentAP,int(100*agentAP/totalAP)))
fout.write('----------------------------------\n')
fout.write('Link Agent Map# Link Origin\n')
fout.write(' Link Destination\n')
fout.write('----------------------------------\n')
# 1234112345612345 name
last_link_from_other_agent = 0
for i in xrange(self.m):
p,q = self.orderedEdges[i]
linkagent = self.link2agent[i]
# Put a star by links that can be completed early since they complete no fields
numfields = len(self.a.edge[p][q]['fields'])
if numfields == 0:
star = '*'
# print '%s %s completes nothing'%(p,q)
else:
star = ''
# print '%s %s completes'%(p,q)
# for t in self.a.edge[p][q]['fields']:
# print ' ',t
if linkagent != agent:
fout.write(plainStr.format(\
i,\
star,\
linkagent+1,\
self.nslabel[p],\
self.names[p],\
self.nslabel[q],\
self.names[q]\
))
last_link_from_other_agent = 1
else:
if last_link_from_other_agent:
fout.write('\n')
last_link_from_other_agent = 0
fout.write(hilitStr.format(\
i,\
star,\
linkagent+1,\
self.nslabel[p],\
self.names[p],\
self.nslabel[q],\
self.names[q]\
))
def animate(self):
# show or save a sequence of images demonstrating how the plan would unfold
from matplotlib.patches import Polygon
fig = plt.figure()
ax = fig.add_subplot(111)
GREEN = ( 0.0 , 1.0 , 0.0 , 0.3)
BLUE = ( 0.0 , 0.0 , 1.0 , 0.3)
RED = ( 1.0 , 0.0 , 0.0 , 0.5)
INVISIBLE = ( 0.0 , 0.0 , 0.0 , 0.0 )
portals = np.array([self.a.node[i]['xy'] for i in self.a.nodes_iter()]).T
# Plot all edges lightly
def dashAllEdges():
for p,q in self.a.edges_iter():
plt.plot(portals[0,[p,q]],portals[1,[p,q]],'k:')
aptotal = 0
edges = []
patches = []
plt.plot(portals[0],portals[1],'go')
# plt.plot(portals[0],portals[1],'bo')
dashAllEdges()
plt.title('AP:\n%s'%commaGroup(aptotal),ha='center')
plt.axis('off')
plt.savefig(self.outputDir+'frame_-1.png'.format(i))
plt.clf()
for i in xrange(self.m):
p,q = self.orderedEdges[i]
# print p,q,self.a.edge[p][q]['fields']
plt.plot(portals[0],portals[1],'go')
# plt.plot(portals[0],portals[1],'bo')
# Plot all edges lightly
dashAllEdges()
for edge in edges:
plt.plot(edge[0],edge[1],'g-')
# plt.plot(edge[0],edge[1],'b-')
# We'll display the new fields in red
newPatches = []
for tri in self.a.edge[p][q]['fields']:
# print 'edge has a field'
coords = np.array([ self.a.node[v]['xy'] for v in tri ])
newPatches.append(Polygon(shrink(coords.T).T,facecolor=RED,\
edgecolor=INVISIBLE))
# newPatches.append(Polygon(shrink(coords.T).T,facecolor=GREEN,\
# edgecolor=INVISIBLE))
# print '%s new patches'%len(newPatches)
aptotal += 313+1250*len(newPatches)
newEdge = np.array([self.a.node[p]['xy'],self.a.node[q]['xy']]).T
patches += newPatches
edges.append(newEdge)
# plt.arrow( x, y, dx, dy, **kwargs )
# plt.arrow( newEdge[0,0],\
# newEdge[1,0],\
# newEdge[0,1]-newEdge[0,0],\
# newEdge[1,1]-newEdge[1,0],\
# fc="k", ec="k")#,head_width=0.0005,head_length=0.001 )
plt.plot(newEdge[0],newEdge[1],'k-',lw=2)
# plt.plot(newEdge[0],newEdge[1],'g-')
ax = plt.gca()
# print 'adding %s patches'%len(patches)
for patch in patches:
ax.add_patch(patch)
ax.set_title('AP:\n%s'%commaGroup(aptotal),ha='center')
ax.axis('off')
plt.savefig(self.outputDir+'frame_{0:02d}.png'.format(i))
ax.cla()
for patch in newPatches:
patch.set_facecolor(GREEN)
# patch.set_facecolor(BLUE)
plt.plot(portals[0],portals[1],'go')
# plt.plot(portals[0],portals[1],'bo')
for edge in edges:
plt.plot(edge[0],edge[1],'g-')
# plt.plot(edge[0],edge[1],'b-')
for patch in patches:
ax.add_patch(patch)
ax.set_title('AP:\n%s'%commaGroup(aptotal),ha='center')
ax.axis('off')
plt.savefig(self.outputDir+'frame_%s.png'%self.m)
ax.cla()
def split3instruct(self):
portals = np.array([self.a.node[i]['xy'] for i in self.a.nodes_iter()]).T
gen1 = self.a.triangulation
oldedges = []
plt.plot(portals[0],portals[1],'go')
plt.axis('off')
plt.savefig(self.outputDir+'depth_-1.png')
plt.clf()
depth = 0
while True:
# newedges[i][0] has the x-coordinates of both verts of edge i
newedges = [ np.array([
self.a.node[p]['xy'] ,\
self.a.node[q]['xy']
]).T\
for j in range(len(gen1)) \
for p,q in gen1[j].edgesByDepth(depth)\
]
if len(newedges) == 0:
break
plt.plot(portals[0],portals[1],'go')
for edge in oldedges:
plt.plot(edge[0],edge[1],'k-')
for edge in newedges:
plt.plot(edge[0],edge[1],'r-')
oldedges += newedges
plt.axis('off')
plt.savefig(self.outputDir+'depth_{0:02d}.png'.format(depth))
plt.clf()
depth += 1
plt.plot(portals[0],portals[1],'go')
for edge in oldedges:
plt.plot(edge[0],edge[1],'k-')
plt.axis('off')
plt.savefig(self.outputDir+'depth_%s.png'%depth)
plt.clf()
| gpl-3.0 |
imaculate/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
qifeigit/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
fmfn/UnbalancedDataset | imblearn/utils/tests/test_show_versions.py | 3 | 1820 | """Test for the show_versions helper. Based on the sklearn tests."""
# Author: Alexander L. Hayes <[email protected]>
# License: MIT
from imblearn.utils._show_versions import _get_deps_info
from imblearn.utils._show_versions import show_versions
def test_get_deps_info():
_deps_info = _get_deps_info()
assert "pip" in _deps_info
assert "setuptools" in _deps_info
assert "imblearn" in _deps_info
assert "sklearn" in _deps_info
assert "numpy" in _deps_info
assert "scipy" in _deps_info
assert "Cython" in _deps_info
assert "pandas" in _deps_info
assert "joblib" in _deps_info
def test_show_versions_default(capsys):
show_versions()
out, err = capsys.readouterr()
assert "python" in out
assert "executable" in out
assert "machine" in out
assert "pip" in out
assert "setuptools" in out
assert "imblearn" in out
assert "sklearn" in out
assert "numpy" in out
assert "scipy" in out
assert "Cython" in out
assert "pandas" in out
assert "keras" in out
assert "tensorflow" in out
assert "joblib" in out
def test_show_versions_github(capsys):
show_versions(github=True)
out, err = capsys.readouterr()
assert "<details><summary>System, Dependency Information</summary>" in out
assert "**System Information**" in out
assert "* python" in out
assert "* executable" in out
assert "* machine" in out
assert "**Python Dependencies**" in out
assert "* pip" in out
assert "* setuptools" in out
assert "* imblearn" in out
assert "* sklearn" in out
assert "* numpy" in out
assert "* scipy" in out
assert "* Cython" in out
assert "* pandas" in out
assert "* keras" in out
assert "* tensorflow" in out
assert "* joblib" in out
assert "</details>" in out
| mit |
spallavolu/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
TomAugspurger/pandas | pandas/io/common.py | 1 | 17139 | """Common IO api utilities"""
import bz2
from collections import abc
import gzip
from io import BufferedIOBase, BytesIO, RawIOBase
import mmap
import os
import pathlib
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Dict,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from urllib.parse import (
urljoin,
urlparse as parse_url,
uses_netloc,
uses_params,
uses_relative,
)
import zipfile
from pandas._typing import FilePathOrBuffer
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import is_file_like
lzma = _import_lzma()
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard("")
if TYPE_CHECKING:
from io import IOBase # noqa: F401
def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in _VALID_URLS
def _expand_user(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
) -> FilePathOrBuffer[AnyStr]:
"""
Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, str):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
def stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
) -> FilePathOrBuffer[AnyStr]:
"""
Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
For backwards compatibility with older pythons, pathlib.Path and
py.path objects are specially coerced.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
if hasattr(filepath_or_buffer, "__fspath__"):
# https://github.com/python/mypy/issues/1424
return filepath_or_buffer.__fspath__() # type: ignore
elif isinstance(filepath_or_buffer, pathlib.Path):
return str(filepath_or_buffer)
return _expand_user(filepath_or_buffer)
def is_s3_url(url) -> bool:
"""Check for an s3, s3n, or s3a url"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in ["s3", "s3n", "s3a"]
def is_gcs_url(url) -> bool:
"""Check for a gcs url"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in ["gcs", "gs"]
def urlopen(*args, **kwargs):
"""
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
the stdlib.
"""
import urllib.request
return urllib.request.urlopen(*args, **kwargs)
def get_fs_for_path(filepath: str):
"""
Get appropriate filesystem given a filepath.
Supports s3fs, gcs and local file system.
Parameters
----------
filepath : str
File path. e.g s3://bucket/object, /local/path, gcs://pandas/obj
Returns
-------
s3fs.S3FileSystem, gcsfs.GCSFileSystem, None
Appropriate FileSystem to use. None for local filesystem.
"""
if is_s3_url(filepath):
from pandas.io import s3
return s3.get_fs()
elif is_gcs_url(filepath):
from pandas.io import gcs
return gcs.get_fs()
else:
return None
def get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: Optional[str] = None,
compression: Optional[str] = None,
mode: Optional[str] = None,
):
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional
encoding : the encoding to use to decode bytes, default is 'utf-8'
mode : str, optional
Returns
-------
Tuple[FilePathOrBuffer, str, str, bool]
Tuple containing the filepath or buffer, the encoding, the compression
and should_close.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
req = urlopen(filepath_or_buffer)
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
# Override compression based on Content-Encoding header
compression = "gzip"
reader = BytesIO(req.read())
req.close()
return reader, encoding, compression, True
if is_s3_url(filepath_or_buffer):
from pandas.io import s3
return s3.get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding, compression=compression, mode=mode
)
if is_gcs_url(filepath_or_buffer):
from pandas.io import gcs
return gcs.get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding, compression=compression, mode=mode
)
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
return _expand_user(filepath_or_buffer), None, compression, False
if not is_file_like(filepath_or_buffer):
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
return filepath_or_buffer, None, compression, False
def file_path_to_url(path: str) -> str:
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
# lazify expensive import (~30ms)
from urllib.request import pathname2url
return urljoin("file:", pathname2url(path))
_compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}
def get_compression_method(
compression: Optional[Union[str, Mapping[str, str]]]
) -> Tuple[Optional[str], Dict[str, str]]:
"""
Simplifies a compression argument to a compression method string and
a mapping containing additional arguments.
Parameters
----------
compression : str or mapping
If string, specifies the compression method. If mapping, value at key
'method' specifies compression method.
Returns
-------
tuple of ({compression method}, Optional[str]
{compression arguments}, Dict[str, str])
Raises
------
ValueError on mapping missing 'method' key
"""
if isinstance(compression, Mapping):
compression_args = dict(compression)
try:
compression = compression_args.pop("method")
except KeyError as err:
raise ValueError("If mapping, compression must have key 'method'") from err
else:
compression_args = {}
return compression, compression_args
def infer_compression(
filepath_or_buffer: FilePathOrBuffer, compression: Optional[str]
) -> Optional[str]:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer : str or file handle
File path or object.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
Returns
-------
string or None
Raises
------
ValueError on invalid compression specified.
"""
# No compression has been explicitly specified
if compression is None:
return None
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
msg = f"Unrecognized compression type: {compression}"
valid = ["infer", None] + sorted(_compression_to_extension)
msg += f"\nValid compression types are {valid}"
raise ValueError(msg)
def get_handle(
path_or_buf,
mode: str,
encoding=None,
compression: Optional[Union[str, Mapping[str, Any]]] = None,
memory_map: bool = False,
is_text: bool = True,
):
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf : str or file handle
File path or object.
mode : str
Mode to open path_or_buf with.
encoding : str or None
Encoding to use.
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {'infer',
'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{'zip', 'gzip', 'bz2'}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other keys as compression options if compression
mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is now
supported for compression modes 'gzip' and 'bz2' as well as 'zip'.
memory_map : boolean, default False
See parsers._parser_params for more information.
is_text : boolean, default True
whether file/buffer is in text format (csv, json, etc.), or in binary
mode (pickle, etc.).
Returns
-------
f : file-like
A file-like object.
handles : list of file-like objects
A list of file-like object that were opened in this function.
"""
need_text_wrapping: Tuple[Type["IOBase"], ...]
try:
from s3fs import S3File
need_text_wrapping = (BufferedIOBase, RawIOBase, S3File)
except ImportError:
need_text_wrapping = (BufferedIOBase, RawIOBase)
handles: List[IO] = list()
f = path_or_buf
# Convert pathlib.Path/py.path.local or string
path_or_buf = stringify_path(path_or_buf)
is_path = isinstance(path_or_buf, str)
compression, compression_args = get_compression_method(compression)
if is_path:
compression = infer_compression(path_or_buf, compression)
if compression:
# GH33398 the type ignores here seem related to mypy issue #5382;
# it may be possible to remove them once that is resolved.
# GZ Compression
if compression == "gzip":
if is_path:
f = gzip.open(
path_or_buf, mode, **compression_args # type: ignore
)
else:
f = gzip.GzipFile(
fileobj=path_or_buf, **compression_args # type: ignore
)
# BZ Compression
elif compression == "bz2":
if is_path:
f = bz2.BZ2File(
path_or_buf, mode, **compression_args # type: ignore
)
else:
f = bz2.BZ2File(path_or_buf, **compression_args) # type: ignore
# ZIP Compression
elif compression == "zip":
zf = _BytesZipFile(path_or_buf, mode, **compression_args)
# Ensure the container is closed as well.
handles.append(zf)
if zf.mode == "w":
f = zf
elif zf.mode == "r":
zip_names = zf.namelist()
if len(zip_names) == 1:
f = zf.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
"Multiple files found in ZIP file. "
f"Only one file per ZIP: {zip_names}"
)
# XZ Compression
elif compression == "xz":
f = _get_lzma_file(lzma)(path_or_buf, mode)
# Unrecognized Compression
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
handles.append(f)
elif is_path:
if encoding:
# Encoding
f = open(path_or_buf, mode, encoding=encoding, newline="")
elif is_text:
# No explicit encoding
f = open(path_or_buf, mode, errors="replace", newline="")
else:
# Binary mode
f = open(path_or_buf, mode)
handles.append(f)
# Convert BytesIO or file objects passed with an encoding
if is_text and (compression or isinstance(f, need_text_wrapping)):
from io import TextIOWrapper
g = TextIOWrapper(f, encoding=encoding, newline="")
if not isinstance(f, (BufferedIOBase, RawIOBase)):
handles.append(g)
f = g
if memory_map and hasattr(f, "fileno"):
try:
wrapped = _MMapWrapper(f)
f.close()
f = wrapped
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
pass
return f, handles
class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
BytesIO provides attributes of file-like object and ZipFile.writestr writes
bytes strings into a member of the archive.
"""
# GH 17778
def __init__(
self,
file: FilePathOrBuffer,
mode: str,
archive_name: Optional[str] = None,
**kwargs,
):
if mode in ["wb", "rb"]:
mode = mode.replace("b", "")
self.archive_name = archive_name
super().__init__(file, mode, zipfile.ZIP_DEFLATED, **kwargs)
def write(self, data):
archive_name = self.filename
if self.archive_name is not None:
archive_name = self.archive_name
super().writestr(archive_name, data)
@property
def closed(self):
return self.fp is None
class _MMapWrapper(abc.Iterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
Parameters
----------
f : file object
File object to be mapped onto memory. Must support the 'fileno'
method or have an equivalent attribute
"""
def __init__(self, f: IO):
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
return getattr(self.mmap, name)
def __iter__(self) -> "_MMapWrapper":
return self
def __next__(self) -> str:
newbytes = self.mmap.readline()
# readline returns bytes, not str, but Python's CSV reader
# expects str, so convert the output to str before continuing
newline = newbytes.decode("utf-8")
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == "":
raise StopIteration
return newline
| bsd-3-clause |
memo/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 51 | 12969 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(
expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(expected_values,
col,
actual_dict[col]))
class TensorFlowDataFrameTestCase(test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _make_test_csv(self):
f = tempfile.NamedTemporaryFile(
dir=self.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse(self):
f = tempfile.NamedTemporaryFile(
dir=self.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (
("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else "")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord(self):
f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir(), delete=False)
w = tf_record.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(
pandas_df, batch_size=10, shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({
"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")
})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = self._make_test_csv_sparse()
feature_spec = {
"int": parsing_ops.FixedLenFeature(None, dtypes.int16, np.nan),
"float": parsing_ops.VarLenFeature(dtypes.float16),
"bool": parsing_ops.VarLenFeature(dtypes.bool),
"string": parsing_ops.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = self._make_test_tfrecord()
features = {
"fixed_len_float":
parsing_ops.FixedLenFeature(
shape=[2], dtype=dtypes.float32, default_value=[0.0, 0.0]),
"var_len_int":
parsing_ops.VarLenFeature(dtype=dtypes.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
test.main()
| apache-2.0 |
bnaul/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 3 | 19784 | import numpy as np
from scipy import sparse
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_raises_regexp
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model._ransac import _dynamic_max_trials
from sklearn.exceptions import ConvergenceWarning
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
rng = np.random.RandomState(1000)
outliers = np.unique(rng.randint(len(X), size=200))
data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
# there is a 1e-9 chance it will take these many trials. No good reason
# 1e-2 isn't enough, can still happen
# 2 is the what ransac defines as min_samples = X.shape[1] + 1
max_trials = _dynamic_max_trials(
len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
for i in range(50):
ransac_estimator.set_params(min_samples=2, random_state=i)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ < max_trials + 1
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert ransac_estimator.score(X[2:], y[2:]) == 1
assert ransac_estimator.score(X[:2], y[:2]) < 1
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_array_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 5
assert ransac_estimator.n_skips_invalid_data_ == 0
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 5
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_model_valid=is_model_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 0
assert ransac_estimator.n_skips_invalid_model_ == 5
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5,
max_skips=3)
msg = ("RANSAC skipped more iterations than `max_skips`")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_skips=3,
max_trials=5)
assert_warns(ConvergenceWarning, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, min_samples=2,
residual_threshold=5,
random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert _dynamic_max_trials(100, 100, 2, 0.99) == 1
# e = 5%, min_samples = 2
assert _dynamic_max_trials(95, 100, 2, 0.99) == 2
# e = 10%, min_samples = 2
assert _dynamic_max_trials(90, 100, 2, 0.99) == 3
# e = 30%, min_samples = 2
assert _dynamic_max_trials(70, 100, 2, 0.99) == 7
# e = 50%, min_samples = 2
assert _dynamic_max_trials(50, 100, 2, 0.99) == 17
# e = 5%, min_samples = 8
assert _dynamic_max_trials(95, 100, 8, 0.99) == 5
# e = 10%, min_samples = 8
assert _dynamic_max_trials(90, 100, 8, 0.99) == 9
# e = 30%, min_samples = 8
assert _dynamic_max_trials(70, 100, 8, 0.99) == 78
# e = 50%, min_samples = 8
assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177
# e = 0%, min_samples = 10
assert _dynamic_max_trials(1, 100, 10, 0) == 0
assert _dynamic_max_trials(1, 100, 10, 1) == float('inf')
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert ransac_estimator.inlier_mask_.shape[0] == n_samples
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = OrthogonalMatchingPursuit()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
def test_ransac_final_model_fit_sample_weight():
X, y = make_regression(n_samples=1000, random_state=10)
rng = check_random_state(42)
sample_weight = rng.randint(1, 4, size=y.shape[0])
sample_weight = sample_weight / sample_weight.sum()
ransac = RANSACRegressor(base_estimator=LinearRegression(), random_state=0)
ransac.fit(X, y, sample_weight=sample_weight)
final_model = LinearRegression()
mask_samples = ransac.inlier_mask_
final_model.fit(
X[mask_samples], y[mask_samples],
sample_weight=sample_weight[mask_samples]
)
assert_allclose(ransac.estimator_.coef_, final_model.coef_)
| bsd-3-clause |
hitszxp/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 44 | 7663 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
"""Compute score for random uniform cluster labelings"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
"""Check that adjusted scores are almost zero on random labels"""
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
"""Compute the Adjusted Mutual Information and test against known values"""
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
"""Check numerical stability when information is exactly zero"""
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
"""Check relation between v_measure, entropy and mutual information"""
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
cjayb/kingjr_natmeg_arhus | JR_toolbox/_skl_king_parallel.py | 2 | 12727 |
print("######################################################################")
print("# Parallel n-split k-stratified-fold continuous SVM Scikitlearn MVPA #")
print("# (c) Jean-Remi King 2012, jeanremi.king [at] gmail [dot] com #")
print("######################################################################")
# Implementation of a multivariate pattern analysis based on the scikit-learn
# toolbox (http://scikit-learn.org/stable/). It reads two .mat files
# (filenameX, filenamey) created by 'jr_classify.m'
#
# Function:
# skl_king_parallel.py filenameX filenamey [number_of_cores]
#
# Inputs:
# in filenameX:
# Xm: samples x features x classification matrix (e.g. trials x
# chans x time)
# in filenamey:
# y: vector indicating the class of each sample. Negative values
# will be used for generalization only. 0 indicates to-be-
# ignored samples.
# y2: cost/weights applied on each sample
# path: export directory
# nameX: export filename X
# namey: export filename y
# folding:type of folding(e.g. stratified)
# n_splits:number of splits
# n_folds: number of folds
# C: SVM penalization parameter
# compute_probas: compute logit fit
# compute_predict: compute traditional SVM
# fs_n: number of univariate features selected for classification
# dims: classification performed on dims dimensions
# dims_tg:classification generalized on dims_tg dimensions
#
# Ouputs:
# predict: prediction matrix (split x samples x dims x dimsg)
# predictg:same as predict for generalized samples
# probas: probas matrix (split x samples x dims x dimsg x class)
# probasg: same as probas for generalized samples
# coef: weight hyperplan vector
# all_folds:folding report (split x fold x samples)
# y_all: original y
# y: training y
# yg: generalized y
# filenameX:
# filenamey:
#
# Results are reported in: path + nameX + '_' + namey + "_results.mat"
###############################################################################
# (c) Jean-Remi King: jeanremi.king [at] gmail [dot] com
###############################################################################
# update 2012 10 18: correct python/matlab dim incompatibility
# update 2012 10 18: correct error fs between 99 and 100 && remove Kbest
# update 2012 10 17: correct error n_features shape and add nice
# update 2012 10 01: correct prediction error+change loading results option
# update 2012 09 14: handle fs float error
# update 2012 09 14: pass n_cores to sys.arg
# version 2012 09 13: implementation of parallelization
###############################################################################
print("LIBRARY")
import sys as sys
import numpy as np
from scipy import stats
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif
from sklearn.externals.joblib import Parallel, delayed
import scipy.io as sio
from sklearn.preprocessing import Scaler
###############################################################################
print("INPUT DATA")
#-- get argument to load specific file
filenameX = str(sys.argv[1])
filenamey = str(sys.argv[2])
if len(sys.argv) <= 3:
n_cores = -1
else:
n_cores = int(sys.argv[3])
print("cores: " + str(n_cores))
print(filenameX)
print(filenamey)
#-- Load data into python
mat = sio.loadmat(filenameX,squeeze_me=False)
Xm_all = mat["Xm"] # data
#-- load classification parameters
mat = sio.loadmat(filenamey, squeeze_me=False)
dims = mat["dims"] # select time windows to compute
dims = np.reshape(dims, dims.size) - 1 # reshape for skl compatibility
dims_tg = mat["dims_tg"] - 1 # svm penalization parameter
mat = sio.loadmat(filenamey, squeeze_me=True)
path = mat["path"]
nameX = mat["nameX"]
namey = mat["namey"]
folding = mat["folding"]
n_splits = mat["n_splits"] # svm penalization parameter
n_folds = mat["n_folds"] # fold number
svm_C = mat["C"] # svm penalization parameter
compute_probas = mat["compute_probas"] # svm penalization parameter
compute_predict = mat["compute_predict"] # svm penalization parameter
fs_n = mat["fs"] # feature selection
y_all = mat["y"] # class used for train and test
print(Xm_all.shape)
print(y_all.shape)
y2_all = mat["y2"] # class used for sample weights
#-- build training and generalizing classes
Xm = Xm_all[y_all > 0, :, :] # training categories
Xmg = Xm_all[y_all < 0, :, :] # generalization categories
y = y_all[y_all > 0]
yg = y_all[y_all < 0]
y2 = y2_all[y_all > 0]
n_samples, n_features, unused = Xm.shape
n_samplesg, unused, unused = Xmg.shape
n_featuresg = n_features
n_dims = dims.shape[0]
n_dimsg = n_dims
n_dims_tg = dims_tg.shape[1]
n_dimsg_tg = dims_tg.shape[1]
n_classes = np.unique(y).shape[0]
#deal with sample_weight
sample_weight = np.ones(y.shape[0])
classes = np.unique(y2)
for c in range(classes.shape[0]):
sample_weight[y2 == classes[c]] = 1. / (np.sum(y2 == classes[c]))
###############################################################################
print("PREPARE CLASSIFICATION")
#--crossvalidation
if folding == 'stratified':
cv = StratifiedKFold(y, k=n_folds)
elif folding == 'kfolding':
cv = KFold(n=y.shape[0], k=n_folds)
elif folding == 'leaveoneout':
n_folds[0] = y.shape[0]
cv = LeaveOneOut(n=y.shape[0])
else:
print("unknown crossvalidation method!")
#-- classifier
clf = svm.SVC(kernel='linear', probability=True, C=svm_C)
#-- normalizer
scaler = Scaler()
#-- feature selection
fs_n = round(n_features * fs_n) / n_features
if fs_n == 100.00:
fs = SelectKBest(f_classif, k=n_features)
else:
if fs_n > 99 and fs_n < 101:
fs = SelectPercentile(f_classif, percentile=fs_n)
else:
fs = SelectPercentile(f_classif, percentile=fs_n/n_features)
#-- results initialization
if compute_predict:
predict = np.zeros([n_splits, n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
probas = []
probasg = []
coef = np.empty([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
all_folds = np.zeros([n_splits, n_folds, n_samples]) ** np.nan
y_shfl = np.copy(y)
Xm_shfl = np.copy(Xm)
sw_shfl = np.copy(sample_weight)
###############################################################################
#-- Define parallel cross validation
def my_pipeline(train, test,
Xm_shfl, y_shfl, sw_shfl, Xmg,
dims, fs, scaler, clf,
n_samples, n_dims, n_dims_tg, n_classes):
# indicate opened fold
sys.stdout.write("<")
sys.stdout.flush()
# initialize results within a given fold
if compute_predict:
predict = np.zeros([n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg, n_classes]) ** np.nan
else:
probas = []
probasg = []
coef = np.empty([n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
# apply different classification along dimension 0
for d in range(0, dims.shape[0]):
Xtrain = Xm_shfl[train, :, dims[d]]
ytrain = y_shfl[train]
sw_train = sw_shfl[train]
# (deal with NaN samples in training)
ytrain = ytrain[~np.isnan(np.nansum(Xtrain, axis=1))]
sw_train = sw_train[~np.isnan(np.nansum(Xtrain, axis=1))]
Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :]
if np.unique(ytrain).shape[0] > 1:
# feature selection
fs.fit(Xtrain, ytrain)
Xtrain = fs.transform(Xtrain)
# normalization
scaler.fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
# SVM fit
clf.fit(Xtrain, ytrain, sample_weight=sw_train)
# retrieve features selected during univariate selection
uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile)
# retrieve hyperplan (unselected features as 0)
coef[d, :, uni_features] = scaler.inverse_transform(clf.coef_).T
# generalize across all time points
for d_tg in range(0, n_dims_tg):
# select data
Xtest = Xm_shfl[test, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtest, axis=1))
Xtest = Xtest[~test_nan, :]
# feature selection from training
Xtest = fs.transform(Xtest)
# normalize from training
Xtest = scaler.transform(Xtest)
# generalize test samples
if (Xtest.shape[0] - np.sum(test_nan)) > 0:
if compute_predict:
predict[test[~test_nan], d, d_tg] = clf.predict(Xtest)
if compute_probas:
probas[test[~test_nan], d, d_tg, :] = clf.predict_proba(Xtest)
# predict on generalization sample
# select data
Xtestg = Xmg[:, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtestg, axis=1))
if (Xtestg.shape[0] - np.sum(test_nan)) > 0:
Xtestg = Xtestg[~test_nan, :]
# preproc feature selection and normalization
Xtestg = fs.transform(Xtestg)
Xtestg = scaler.transform(Xtestg)
# compute prediction
if compute_predict:
predictg[~test_nan, d, d_tg] = clf.predict(Xtestg)
if compute_probas:
probasg[~test_nan, d, d_tg, :] = clf.predict_proba(Xtestg)
# summarize fold results
out = {
'coef': coef,
'predict': predict,
'predictg': predictg,
'probas': probas,
'probasg': probasg}
# indicate end of fold
sys.stdout.write(">")
sys.stdout.flush()
return out
###############################################################################
print("CLASSIFY")
#-- Shuffle split
for split in range(n_splits):
print("split " + str(split))
# shuffle order in case this is not the first split
new_order = np.array(range(y.shape[0]))
if split > 0:
np.random.shuffle(new_order)
y_shfl[new_order] = np.copy(y)
Xm_shfl[new_order, :, :] = np.copy(Xm)
sw_shfl[new_order] = np.copy(sample_weight)
cv = StratifiedKFold(y_shfl, k=n_folds)
# Cross-validation computed in parallel
# run parallel computation
out = Parallel(n_jobs=n_cores)(delayed(my_pipeline)(
train=train,
test=test,
Xm_shfl=Xm_shfl,
y_shfl=y_shfl,
sw_shfl=sw_shfl,
Xmg=Xmg,
dims=dims,
fs=fs,
scaler=scaler,
clf=clf,
n_samples=n_samples,
n_dims=n_dims,
n_dims_tg=n_dims_tg,
n_classes=n_classes) for train, test in cv)
# reorder results folds and splits
for fold, (train, test) in enumerate(cv):
all_folds[split, fold, train] = 1
all_folds[split, fold, test] = 0
coef[split, fold, :, :, :] = out[fold]['coef']
if compute_predict:
predict[split, test, :, :] = out[fold]['predict'][new_order[test], :, :]
predictg[split, :, :, :, fold] = out[fold]['predictg']
if compute_probas:
probas[split, test, :, :, :] = out[fold]['probas'][new_order[test], :, :, :]
probasg[split, :, :, :, :, fold] = out[fold]['probasg']
all_folds[split, :, :] = all_folds[split, :, new_order].T
###############################################################################
print("EXPORT DATA")
mat['predict'] = predict
mat['predictg'] = predictg
mat['probas'] = probas
mat['probasg'] = probasg
mat['coef'] = coef
mat['all_folds'] = all_folds
mat['y_all'] = y_all
mat['y'] = y
mat['yg'] = yg
mat['filenameX'] = filenameX
mat['filenamey'] = filenamey
output = path + nameX + '_' + namey + "_results.mat"
print(output)
sio.savemat(output, mat)
| bsd-3-clause |
alexsavio/scikit-learn | examples/model_selection/grid_search_digits.py | 33 | 2764 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
montoyjh/pymatgen | pymatgen/io/abinit/nodes.py | 1 | 43285 | # coding: utf-8
"""
This module defines the Node class that is inherited by Task, Work and Flow objects.
"""
import sys
import os
import time
import collections
import abc
import numpy as np
from pprint import pprint
from pymatgen.util.io_utils import AtomicFile
from pydispatch import dispatcher
from monty.termcolor import colored
from monty.serialization import loadfn
from monty.string import is_string
from monty.io import FileLock
from monty.collections import AttrDict, Namespace
from monty.functools import lazy_property
from monty.json import MSONable
from pymatgen.util.serialization import json_pretty_dump, pmg_serialize
from .utils import File, Directory, Dirviz, irdvars_for_ext, abi_extensions
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
def _2attrs(item):
return item if item is None or isinstance(list, tuple) else (item,)
class Status(int):
"""This object is an integer representing the status of the `Node`."""
# Possible status of the node. See monty.termocolor for the meaning of color, on_color and attrs.
_STATUS_INFO = [
#(value, name, color, on_color, attrs)
(1, "Initialized", None , None, None), # Node has been initialized
(2, "Locked", "grey" , None, None), # Task is locked an must be explicitly unlocked by an external subject (Work).
(3, "Ready", None , None, None), # Node is ready i.e. all the depencies of the node have status S_OK
(4, "Submitted", "blue" , None, None), # Node has been submitted (The `Task` is running or we have started to finalize the Work)
(5, "Running", "magenta", None, None), # Node is running.
(6, "Done", None , None, None), # Node done, This does not imply that results are ok or that the calculation completed successfully
(7, "AbiCritical", "red" , None, None), # Node raised an Error by ABINIT.
(8, "QCritical", "red" , "on_white", None), # Node raised an Error by submitting submission script, or by executing it
(9, "Unconverged", "red" , "on_yellow", None), # This usually means that an iterative algorithm didn't converge.
(10, "Error", "red" , None, None), # Node raised an unrecoverable error, usually raised when an attempt to fix one of other types failed.
(11, "Completed", "green" , None, None), # Execution completed successfully.
]
_STATUS2STR = collections.OrderedDict([(t[0], t[1]) for t in _STATUS_INFO])
_STATUS2COLOR_OPTS = collections.OrderedDict([(t[0], {"color": t[2], "on_color": t[3], "attrs": _2attrs(t[4])}) for t in _STATUS_INFO])
def __repr__(self):
return "<%s: %s, at %s>" % (self.__class__.__name__, str(self), id(self))
def __str__(self):
"""String representation."""
return self._STATUS2STR[self]
@classmethod
def as_status(cls, obj):
"""Convert obj into Status."""
if obj is None: return None
return obj if isinstance(obj, cls) else cls.from_string(obj)
@classmethod
def from_string(cls, s):
"""Return a `Status` instance from its string representation."""
for num, text in cls._STATUS2STR.items():
if text == s:
return cls(num)
else:
raise ValueError("Wrong string %s" % s)
@classmethod
def all_status_strings(cls):
"""List of strings with all possible values status."""
return [info[1] for info in cls._STATUS_INFO]
@property
def is_critical(self):
"""True if status is critical."""
return str(self) in ("AbiCritical", "QCritical", "Unconverged", "Error")
@property
def color_opts(self):
return self._STATUS2COLOR_OPTS[self]
@property
def colored(self):
"""Return colorized text used to print the status if the stream supports it."""
return colored(str(self), **self.color_opts)
class Dependency:
"""
This object describes the dependencies among the nodes of a calculation.
A `Dependency` consists of a `Node` that produces a list of products (files)
that are used by the other nodes (`Task` or `Work`) to start the calculation.
One usually creates the object by calling work.register
Example:
# Register the SCF task in work.
scf_task = work.register(scf_strategy)
# Register the NSCF calculation and its dependency on the SCF run via deps.
nscf_task = work.register(nscf_strategy, deps={scf_task: "DEN"})
"""
def __init__(self, node, exts=None):
"""
Args:
node: The task or the worfklow associated to the dependency or string with a filepath.
exts: Extensions of the output files that are needed for running the other tasks.
"""
self._node = Node.as_node(node)
if exts and is_string(exts): exts = exts.split()
# Extract extensions.
self.exts = [e for e in exts if not e.startswith("@")]
# Save getters
self.getters = [e for e in exts if e.startswith("@")]
#if self.getters: print(self.getters)
def __hash__(self):
return hash(self._node)
def __repr__(self):
return "node %s will produce: %s " % (repr(self.node), repr(self.exts))
def __str__(self):
return "node %s will produce: %s " % (str(self.node), str(self.exts))
@property
def info(self):
return str(self.node)
@property
def node(self):
"""The :class:`Node` associated to the dependency."""
return self._node
@property
def status(self):
"""The status of the dependency, i.e. the status of the :class:`Node`."""
return self.node.status
@lazy_property
def products(self):
"""List of output files produces by self."""
_products = []
for ext in self.exts:
prod = Product(ext, self.node.opath_from_ext(ext))
_products.append(prod)
return _products
def apply_getters(self, task):
"""
This function is called when we specify the task dependencies with the syntax:
deps={node: "@property"}
In this case the task has to the get `property` from `node` before starting the calculation.
At present, the following properties are supported:
- @structure
"""
if not self.getters: return
for getter in self.getters:
if getter == "@structure":
task.history.info("Getting structure from %s" % self.node)
new_structure = self.node.get_final_structure()
task._change_structure(new_structure)
else:
raise ValueError("Wrong getter %s" % getter)
def connecting_vars(self):
"""
Returns a dictionary with the variables that must be added to the
input file in order to connect this :class:`Node` to its dependencies.
"""
vars = {}
for prod in self.products:
vars.update(prod.connecting_vars())
return vars
def get_filepaths_and_exts(self):
"""Returns the paths of the output files produced by self and its extensions"""
filepaths = [prod.filepath for prod in self.products]
exts = [prod.ext for prod in self.products]
return filepaths, exts
class Product:
"""
A product represents an output file produced by ABINIT instance.
This file is needed to start another `Task` or another `Work`.
"""
def __init__(self, ext, path):
"""
Args:
ext: ABINIT file extension
path: (asbolute) filepath
"""
if ext not in abi_extensions():
raise ValueError("Extension %s has not been registered in the internal database" % str(ext))
self.ext = ext
self.file = File(path)
@classmethod
def from_file(cls, filepath):
"""Build a :class:`Product` instance from a filepath."""
# Find the abinit extension.
for i in range(len(filepath)):
if filepath[i:] in abi_extensions():
ext = filepath[i:]
break
else:
raise ValueError("Cannot detect abinit extension in %s" % filepath)
return cls(ext, filepath)
def __str__(self):
return "File=%s, Extension=%s, " % (self.file.path, self.ext)
@property
def filepath(self):
"""Absolute path of the file."""
return self.file.path
def connecting_vars(self):
"""
Returns a dictionary with the ABINIT variables that
must be used to make the code use this file.
"""
return irdvars_for_ext(self.ext)
class GridFsFile(AttrDict):
"""Information on a file that will stored in the MongoDb gridfs collection."""
def __init__(self, path, fs_id=None, mode="b"):
super(GridFsFile, self).__init__(path=path, fs_id=fs_id, mode=mode)
class NodeResults(dict, MSONable):
"""Dictionary used to store the most important results produced by a :class:`Node`."""
JSON_SCHEMA = {
"type": "object",
"properties": {
"node_id": {"type": "integer", "required": True},
"node_finalized": {"type": "boolean", "required": True},
"node_history": {"type": "array", "required": True},
"node_class": {"type": "string", "required": True},
"node_name": {"type": "string", "required": True},
"node_status": {"type": "string", "required": True},
"in": {"type": "object", "required": True, "description": "dictionary with input parameters"},
"out": {"type": "object", "required": True, "description": "dictionary with the output results"},
"exceptions": {"type": "array", "required": True},
"files": {"type": "object", "required": True},
},
}
@classmethod
def from_node(cls, node):
"""Initialize an instance of `NodeResults` from a `Node` subclass."""
kwargs = dict(
node_id=node.node_id,
node_finalized=node.finalized,
node_history=list(node.history),
node_name=node.name,
node_class=node.__class__.__name__,
node_status=str(node.status),
)
return node.Results(node, **kwargs)
def __init__(self, node, **kwargs):
super(NodeResults, self).__init__(**kwargs)
self.node = node
if "in" not in self: self["in"] = Namespace()
if "out" not in self: self["out"] = Namespace()
if "exceptions" not in self: self["exceptions"] = []
if "files" not in self: self["files"] = Namespace()
@property
def exceptions(self):
return self["exceptions"]
@property
def gridfs_files(self):
"""List with the absolute paths of the files to be put in GridFs."""
return self["files"]
def register_gridfs_files(self, **kwargs):
"""
This function registers the files that will be saved in GridFS.
kwargs is a dictionary mapping the key associated to the file (usually the extension)
to the absolute path. By default, files are assumed to be in binary form, for formatted files
one should pass a tuple ("filepath", "t").
Example::
results.register_gridfs(GSR="path/to/GSR.nc", text_file=("/path/to/txt_file", "t"))
The GSR file is a binary file, whereas text_file is a text file.
"""
d = {}
for k, v in kwargs.items():
mode = "b"
if isinstance(v, (list, tuple)): v, mode = v
d[k] = GridFsFile(path=v, mode=mode)
self["files"].update(d)
return self
def push_exceptions(self, *exceptions):
for exc in exceptions:
newstr = str(exc)
if newstr not in self.exceptions:
self["exceptions"] += [newstr,]
@pmg_serialize
def as_dict(self):
return self.copy()
@classmethod
def from_dict(cls, d):
return cls({k: v for k, v in d.items() if k not in ("@module", "@class")})
def json_dump(self, filename):
json_pretty_dump(self.as_dict(), filename)
@classmethod
def json_load(cls, filename):
return cls.from_dict(loadfn(filename))
def validate_json_schema(self):
import validictory
d = self.as_dict()
try:
validictory.validate(d, self.JSON_SCHEMA)
return True
except ValueError as exc:
pprint(d)
print(exc)
return False
def update_collection(self, collection):
"""
Update a mongodb collection.
"""
node = self.node
flow = node if node.is_flow else node.flow
# Build the key used to store the entry in the document.
key = node.name
if node.is_task:
key = "w" + str(node.pos[0]) + "_t" + str(node.pos[1])
elif node.is_work:
key = "w" + str(node.pos)
db = collection.database
# Save files with GridFs first in order to get the ID.
if self.gridfs_files:
import gridfs
fs = gridfs.GridFS(db)
for ext, gridfile in self.gridfs_files.items():
logger.info("gridfs: about to put file:", str(gridfile))
# Here we set gridfile.fs_id that will be stored in the mondodb document
try:
with open(gridfile.path, "r" + gridfile.mode) as f:
gridfile.fs_id = fs.put(f, filename=gridfile.path)
except IOError as exc:
logger.critical(str(exc))
if flow.mongo_id is None:
# Flow does not have a mongo_id, allocate doc for the flow and save its id.
flow.mongo_id = collection.insert({})
print("Creating flow.mongo_id", flow.mongo_id, type(flow.mongo_id))
# Get the document from flow.mongo_id and update it.
doc = collection.find_one({"_id": flow.mongo_id})
if key in doc:
raise ValueError("%s is already in doc!" % key)
doc[key] = self.as_dict()
collection.save(doc)
#collection.update({'_id':mongo_id}, {"$set": doc}, upsert=False)
def check_spectator(node_method):
"""
Decorator for :class:`Node` methods. Raise `SpectatorNodeError`.
"""
from functools import wraps
@wraps(node_method)
def wrapper(*args, **kwargs):
node = args[0]
if node.in_spectator_mode:
#raise node.SpectatorError("You should not call this method when the node in spectator_mode")
#warnings.warn("You should not call %s when the node in spectator_mode" % node_method)
import warnings
return node_method(*args, **kwargs)
return wrapper
class NodeError(Exception):
"""Base Exception raised by :class:`Node` subclasses"""
class SpectatorNodeError(NodeError):
"""
Exception raised by :class:`Node` methods when the node is in spectator mode
and we are calling a method with side effects.
"""
class Node(metaclass=abc.ABCMeta):
"""
Abstract base class defining the interface that must be
implemented by the nodes of the calculation.
Nodes are hashable and can be tested for equality
"""
Results = NodeResults
Error = NodeError
SpectatorError = SpectatorNodeError
# Possible status of the node.
S_INIT = Status.from_string("Initialized")
S_LOCKED = Status.from_string("Locked")
S_READY = Status.from_string("Ready")
S_SUB = Status.from_string("Submitted")
S_RUN = Status.from_string("Running")
S_DONE = Status.from_string("Done")
S_ABICRITICAL = Status.from_string("AbiCritical")
S_QCRITICAL = Status.from_string("QCritical")
S_UNCONVERGED = Status.from_string("Unconverged")
#S_CANCELLED = Status.from_string("Cancelled")
S_ERROR = Status.from_string("Error")
S_OK = Status.from_string("Completed")
ALL_STATUS = [
S_INIT,
S_LOCKED,
S_READY,
S_SUB,
S_RUN,
S_DONE,
S_ABICRITICAL,
S_QCRITICAL,
S_UNCONVERGED,
#S_CANCELLED,
S_ERROR,
S_OK,
]
# Color used to plot the network in networkx
color_rgb = np.array((105, 105, 105)) / 255
def __init__(self):
self._in_spectator_mode = False
# Node identifier.
self._node_id = get_newnode_id()
# List of dependencies
self._deps = []
# List of files (products) needed by this node.
self._required_files = []
# Used to push additional info during the execution.
self.history = NodeHistory(maxlen=80)
# Actions performed to fix abicritical events.
self._corrections = NodeCorrections()
# Set to true if the node has been finalized.
self._finalized = False
self._status = self.S_INIT
def __eq__(self, other):
if not isinstance(other, Node): return False
return self.node_id == other.node_id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.node_id)
def __repr__(self):
try:
return "<%s, node_id=%s, workdir=%s>" % (
self.__class__.__name__, self.node_id, self.relworkdir)
except AttributeError:
# this usually happens when workdir has not been initialized
return "<%s, node_id=%s, workdir=None>" % (self.__class__.__name__, self.node_id)
#def __setattr__(self, name, value):
# if self.in_spectator_mode:
# raise RuntimeError("You should not call __setattr__ in spectator_mode")
# return super(Node, self).__setattr__(name,value)
@lazy_property
def color_hex(self):
"""Node color as Hex Triplet https://en.wikipedia.org/wiki/Web_colors#Hex_triplet"""
def clamp(x):
return max(0, min(int(x), 255))
r, g, b = np.trunc(self.color_rgb * 255)
return "#{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b))
def isinstance(self, class_or_string):
"""
Check whether the node is a instance of `class_or_string`.
Unlinke the standard isinstance builtin, the method accepts either a class or a string.
In the later case, the string is compared with self.__class__.__name__ (case insensitive).
"""
if class_or_string is None:
return False
import inspect
if inspect.isclass(class_or_string):
return isinstance(self, class_or_string)
else:
return self.__class__.__name__.lower() == class_or_string.lower()
@classmethod
def as_node(cls, obj):
"""
Convert obj into a Node instance.
Return:
obj if obj is a Node instance,
cast obj to :class:`FileNode` instance of obj is a string.
None if obj is None
"""
if isinstance(obj, cls):
return obj
elif is_string(obj):
# Assume filepath.
return FileNode(obj)
elif obj is None:
return obj
else:
raise TypeError("Don't know how to convert %s to Node instance." % obj)
@property
def name(self):
"""
The name of the node
(only used for facilitating its identification in the user interface).
"""
try:
return self._name
except AttributeError:
if self.is_task:
try:
return self.pos_str
except:
return os.path.basename(self.workdir)
else:
return os.path.basename(self.workdir)
@property
def relworkdir(self):
"""Return a relative version of the workdir"""
if getattr(self, "workdir", None) is None:
return None
try:
return os.path.relpath(self.workdir)
except OSError:
# current working directory may not be defined!
return self.workdir
def set_name(self, name):
"""Set the name of the Node."""
self._name = name
@property
def node_id(self):
"""Node identifier."""
return self._node_id
@check_spectator
def set_node_id(self, node_id):
"""Set the node identifier. Use it carefully!"""
self._node_id = node_id
@property
def finalized(self):
"""True if the `Node` has been finalized."""
return self._finalized
@finalized.setter
def finalized(self, boolean):
self._finalized = boolean
self.history.info("Finalized set to %s" % self._finalized)
@property
def in_spectator_mode(self):
return self._in_spectator_mode
@in_spectator_mode.setter
def in_spectator_mode(self, mode):
self._in_spectator_mode = bool(mode)
#self.history.info("in_spectator_mode set to %s" % mode)
@property
def corrections(self):
"""
List of dictionaries with infornation on the actions performed to solve `AbiCritical` Events.
Each dictionary contains the `AbinitEvent` who triggered the correction and
a human-readable message with the description of the operation performed.
"""
return self._corrections
@property
def num_corrections(self):
return len(self.corrections)
def log_correction(self, event, action):
"""
This method should be called once we have fixed the problem associated to this event.
It adds a new entry in the correction history of the node.
Args:
event: :class:`AbinitEvent` that triggered the correction.
action (str): Human-readable string with info on the action perfomed to solve the problem.
"""
# TODO: Create CorrectionObject
action = str(action)
self.history.info(action)
self._corrections.append(dict(
event=event.as_dict(),
action=action,
))
@property
def is_file(self):
"""True if this node is a file"""
return isinstance(self, FileNode)
@property
def is_task(self):
"""True if this node is a Task"""
from .tasks import Task
return isinstance(self, Task)
@property
def is_work(self):
"""True if this node is a Work"""
from .works import Work
return isinstance(self, Work)
@property
def is_flow(self):
"""True if this node is a Flow"""
from .flows import Flow
return isinstance(self, Flow)
@property
def deps(self):
"""
List of :class:`Dependency` objects defining the dependencies
of this `Node`. Empty list if this :class:`Node` does not have dependencies.
"""
return self._deps
@check_spectator
def add_deps(self, deps):
"""
Add a list of dependencies to the :class:`Node`.
Args:
deps: List of :class:`Dependency` objects specifying the dependencies of the node.
or dictionary mapping nodes to file extensions e.g. {task: "DEN"}
"""
if isinstance(deps, collections.Mapping):
# Convert dictionary into list of dependencies.
deps = [Dependency(node, exts) for node, exts in deps.items()]
# We want a list
if not isinstance(deps, (list, tuple)):
deps = [deps]
assert all(isinstance(d, Dependency) for d in deps)
# Add the dependencies to the node
self._deps.extend(deps)
if self.is_work:
# The task in the work should inherit the same dependency.
for task in self:
task.add_deps(deps)
# If we have a FileNode as dependency, add self to its children
# Node.get_parents will use this list if node.is_isfile.
for dep in (d for d in deps if d.node.is_file):
dep.node.add_filechild(self)
@check_spectator
def remove_deps(self, deps):
"""
Remove a list of dependencies from the :class:`Node`.
Args:
deps: List of :class:`Dependency` objects specifying the dependencies of the node.
"""
if not isinstance(deps, (list, tuple)):
deps = [deps]
assert all(isinstance(d, Dependency) for d in deps)
self._deps = [d for d in self._deps if d not in deps]
if self.is_work:
# remove the same list of dependencies from the task in the work
for task in self:
task.remove_deps(deps)
@property
def deps_status(self):
"""Returns a list with the status of the dependencies."""
if not self.deps:
return [self.S_OK]
return [d.status for d in self.deps]
def depends_on(self, other):
"""True if this node depends on the other node."""
return other in [d.node for d in self.deps]
def get_parents(self):
"""Return the list of nodes in the :class:`Flow` required by this :class:`Node`"""
return [d.node for d in self.deps]
#parents = []
#for work in self.flow:
# if self.depends_on(work): parents.append(work)
# for task in work:
# if self.depends_on(task): parents.append(task)
#return parents
def get_children(self):
"""
Return the list of nodes in the :class:`Flow` that depends on this :class:`Node`
.. note::
This routine assumes the entire flow has been allocated.
"""
# Specialized branch for FileNode.
if self.is_file:
return self.filechildren
# Inspect the entire flow to get children.
children = []
for work in self.flow:
if work.depends_on(self): children.append(work)
for task in work:
if task.depends_on(self): children.append(task)
return children
def str_deps(self):
"""Return the string representation of the dependencies of the node."""
lines = []
app = lines.append
app("Dependencies of node %s:" % str(self))
for i, dep in enumerate(self.deps):
app("%d) %s, status=%s" % (i, dep.info, str(dep.status)))
return "\n".join(lines)
def get_vars_dataframe(self, *varnames):
"""
Return pandas DataFrame with the value of the variables specified in `varnames`.
Can be used for task/works/flow. It's recursive!
.. example:
flow.get_vars_dataframe("ecut", "ngkpt")
work.get_vars_dataframe("acell", "usepawu")
"""
import pandas as pd
if self.is_task:
df = pd.DataFrame([{v: self.input.get(v, None) for v in varnames}], index=[self.name], columns=varnames)
df["class"] = self.__class__.__name__
return df
elif self.is_work:
frames = [task.get_vars_dataframe(*varnames) for task in self]
return pd.concat(frames)
elif self.is_flow:
frames = [work.get_vars_dataframe(*varnames) for work in self]
return pd.concat(frames)
else:
#print("Ignoring node of type: `%s`" % type(self))
return pd.DataFrame(index=[self.name])
def get_graphviz_dirtree(self, engine="automatic", **kwargs):
"""
Generate directory graph in the DOT language. The graph show the files and directories
in the node workdir.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
if engine == "automatic":
engine = "fdp"
return Dirviz(self.workdir).get_cluster_graph(engine=engine, **kwargs)
def set_gc(self, gc):
"""
Set the garbage collector.
"""
assert isinstance(gc, GarbageCollector)
self._gc = gc
@property
def gc(self):
"""
Garbage collector. None if garbage collection is deactivated.
Use flow.set_garbage_collector to initialize the object.
"""
try:
return self._gc
except AttributeError:
#if not self.is_flow and self.flow.gc: return self.flow.gc
return None
@property
def event_handlers(self):
"""
The list of handlers registered for this node.
If the node is not a `Flow` and does not have its own list of
`handlers` the handlers registered at the level of the flow are returned.
This trick allows one to registered different handlers at the level of the Task
for testing purposes. By default, we have a common list of handlers for all the nodes in the flow.
This choice facilitates the automatic installation of the handlers when we use callbacks to generate
new Works and Tasks!
"""
if self.is_flow:
return self._event_handlers
try:
return self._event_handlers
except AttributeError:
return self.flow._event_handlers
@check_spectator
def install_event_handlers(self, categories=None, handlers=None):
"""
Install the `EventHandlers for this `Node`. If no argument is provided
the default list of handlers is installed.
Args:
categories: List of categories to install e.g. base + can_change_physics
handlers: explicit list of :class:`EventHandler` instances.
This is the most flexible way to install handlers.
.. note::
categories and handlers are mutually exclusive.
"""
if categories is not None and handlers is not None:
raise ValueError("categories and handlers are mutually exclusive!")
from .events import get_event_handler_classes
if categories:
raise NotImplementedError()
handlers = [cls() for cls in get_event_handler_classes(categories=categories)]
else:
handlers = handlers or [cls() for cls in get_event_handler_classes()]
self._event_handlers = handlers
def show_event_handlers(self, stream=sys.stdout, verbose=0):
"""Print to `stream` the event handlers installed for this flow."""
lines = ["List of event handlers installed:"]
for handler in self.event_handlers:
if verbose:
lines.extend(handler.__class__.cls2str().split("\n"))
else:
lines.extend(str(handler).split("\n"))
stream.write("\n".join(lines))
stream.write("\n")
def send_signal(self, signal):
"""
Send signal from this node to all connected receivers unless the node is in spectator mode.
signal -- (hashable) signal value, see `dispatcher` connect for details
Return a list of tuple pairs [(receiver, response), ... ]
or None if the node is in spectator mode.
if any receiver raises an error, the error propagates back
through send, terminating the dispatch loop, so it is quite
possible to not have all receivers called if a raises an error.
"""
if self.in_spectator_mode: return None
logger.debug("Node %s broadcasts signal %s" % (self, signal))
dispatcher.send(signal=signal, sender=self)
##########################
### Abstract protocol ####
##########################
@property
@abc.abstractmethod
def status(self):
"""The status of the `Node`."""
@abc.abstractmethod
def check_status(self):
"""Check the status of the `Node`."""
class FileNode(Node):
"""
A Node that consists of a file. May be not yet existing
Mainly used to connect :class:`Task` objects to external files produced in previous runs.
"""
color_rgb = np.array((102, 51, 255)) / 255
def __init__(self, filename):
super(FileNode, self).__init__()
self.filepath = os.path.abspath(filename)
# Directories with input|output|temporary data.
self.workdir = os.path.dirname(self.filepath)
self.indir = Directory(self.workdir)
self.outdir = Directory(self.workdir)
self.tmpdir = Directory(self.workdir)
self._filechildren = []
def __repr__(self):
try:
return "<%s, node_id=%s, rpath=%s>" % (
self.__class__.__name__, self.node_id, os.path.relpath(self.filepath))
except AttributeError:
# this usually happens when workdir has not been initialized
return "<%s, node_id=%s, path=%s>" % (self.__class__.__name__, self.node_id, self.filepath)
@lazy_property
def basename(self):
"""Basename of the file."""
return os.path.basename(self.filepath)
@property
def products(self):
return [Product.from_file(self.filepath)]
def opath_from_ext(self, ext):
return self.filepath
@property
def status(self):
return self.S_OK if os.path.exists(self.filepath) else self.S_ERROR
def check_status(self):
return self.status
def get_results(self, **kwargs):
results = super(FileNode, self).get_results(**kwargs)
#results.register_gridfs_files(filepath=self.filepath)
return results
def add_filechild(self, node):
"""Add a node (usually Task) to the children of this FileNode."""
self._filechildren.append(node)
@property
def filechildren(self):
"""List with the children (nodes) of this FileNode."""
return self._filechildren
# This part provides IO capabilities to FileNode with API similar to the one implemented in Task.
# We may need it at runtime to extract information from netcdf files e.g.
# a NscfTask will change the FFT grid to match the one used in the GsTask.
def abiopen(self):
from abipy import abilab
return abilab.abiopen(self.filepath)
def open_gsr(self):
return self._abiopen_abiext("_GSR.nc")
def _abiopen_abiext(self, abiext):
import glob
from abipy import abilab
if not self.filepath.endswith(abiext):
msg = """\n
File type does not match the abinit file extension.
Caller asked for abiext: `%s` whereas filepath: `%s`.
Continuing anyway assuming that the netcdf file provides the API/dims/vars neeeded by the caller.
""" % (abiext, self.filepath)
logger.warning(msg)
self.history.warning(msg)
#try to find file in the same path
filepath = os.path.dirname(self.filepath)
glob_result = glob.glob(os.path.join(filepath,"*%s"%abiext))
if len(glob_result): return abilab.abiopen(glob_result[0])
return self.abiopen()
class HistoryRecord:
"""
A `HistoryRecord` instance represents an entry in the :class:`NodeHistory`.
`HistoryRecord` instances are created every time something is logged.
They contain all the information pertinent to the event being logged.
The main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record.
The record also includes information such as when the record was created,
the source line where the logging call was made
.. attribute:: levelno
Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL)
.. attribute:: levelname
Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
.. attribute:: pathname
Full pathname of the source file where the logging call was issued (if available)
.. attribute:: filename
Filename portion of pathname
.. attribute:: module
Module (name portion of filename)
.. attribute:: lineno
Source line number where the logging call was issued (if available)
.. attribute:: func_name
Function name
.. attribute:: created
Time when the HistoryRecord was created (time.time() return value)
.. attribute:: asctime
Textual time when the HistoryRecord was created
.. attribute:: message
The result of record.getMessage(), computed just as the record is emitted
"""
def __init__(self, level, pathname, lineno, msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelno = level
self.pathname = pathname
self.msg = msg
self.levelname = "FOOBAR" #getLevelName(level)
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.func_name = func
self.created = time.time()
self.asctime = time.asctime()
# Remove milliseconds
i = self.asctime.find(".")
if i != -1: self.asctime = self.asctime[:i]
def __repr__(self):
return '<%s, %s, %s, %s,\n"%s">' % (self.__class__.__name__, self.levelno, self.pathname, self.lineno, self.msg)
def __str__(self):
return self.get_message(metadata=False)
def get_message(self, metadata=False, asctime=True):
"""
Return the message after merging any user-supplied arguments with the message.
Args:
metadata: True if function and module name should be added.
asctime: True if time string should be added.
"""
msg = self.msg if is_string(self.msg) else str(self.msg)
if self.args:
try:
msg = msg % self.args
except:
msg += str(self.args)
if asctime: msg = "[" + self.asctime + "] " + msg
# Add metadata
if metadata:
msg += "\nCalled by %s at %s:%s\n" % (self.func_name, self.pathname, self.lineno)
return msg
@pmg_serialize
def as_dict(self):
return {'level': self.levelno, 'pathname': self.pathname, 'lineno': self.lineno, 'msg': self.msg,
'args': self.args, 'exc_info': self.exc_info, 'func': self.func_name}
@classmethod
def from_dict(cls, d):
return cls(level=d['level'], pathname=d['pathname'], lineno=int(d['lineno']), msg=d['msg'], args=d['args'],
exc_info=d['exc_info'], func=d['func'])
class NodeHistory(collections.deque):
"""Logger-like object"""
def __str__(self):
return self.to_string()
def to_string(self, metadata=False):
"""Returns a string with the history. Set metadata to True to have info on function and module."""
return "\n".join(rec.get_message(metadata=metadata) for rec in self)
def info(self, msg, *args, **kwargs):
"""Log 'msg % args' with the info severity level"""
self._log("INFO", msg, args, kwargs)
def warning(self, msg, *args, **kwargs):
"""Log 'msg % args' with the warning severity level"""
self._log("WARNING", msg, args, kwargs)
def critical(self, msg, *args, **kwargs):
"""Log 'msg % args' with the critical severity level"""
self._log("CRITICAL", msg, args, kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None):
"""Low-level logging routine which creates a :class:`HistoryRecord`."""
if exc_info and not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
self.append(HistoryRecord(level, "unknown filename", 0, msg, args, exc_info, func="unknown func"))
class NodeCorrections(list):
"""Iterable storing the correctios performed by the :class:`EventHandler`"""
#TODO
# Correction should have a human-readable message
# and a list of operatins in JSON format (Modder?) so that
# we can read them and re-apply the corrections to another task if needed.
#def count_event_class(self, event_class):
# """
# Return the number of times the event class has been already fixed.
# """
# #return len([c for c in self if c["event"]["@class"] == str(event_class)])
#def _find(self, event_class)
class GarbageCollector:
"""This object stores information on the """
def __init__(self, exts, policy):
self.exts, self.policy = set(exts), policy
# The code below initializes a counter from a file when the module is imported
# and save the counter's updated value automatically when the program terminates
# without relying on the application making an explicit call into this module at termination.
_COUNTER = None
_COUNTER_FILE = os.path.join(os.path.expanduser("~"), ".abinit", "abipy", "nodecounter")
def init_counter():
global _COUNTER
# Make dir and file if not present.
if not os.path.exists(os.path.dirname(_COUNTER_FILE)):
os.makedirs(os.path.dirname(_COUNTER_FILE))
if not os.path.exists(_COUNTER_FILE):
with open(_COUNTER_FILE, "wt") as fh:
fh.write("%d\n" % -1)
if _COUNTER is None:
with open(_COUNTER_FILE, "r") as fh:
s = fh.read().strip()
if not s: s = "-1"
_COUNTER = int(s)
def get_newnode_id():
"""
Returns a new node identifier used for :class:`Task`, :class:`Work` and :class:`Flow` objects.
.. warning:
The id is unique inside the same python process so be careful when
Works and Tasks are constructed at run-time or when threads are used.
"""
init_counter()
global _COUNTER
_COUNTER += 1
return _COUNTER
def save_lastnode_id():
"""Save the id of the last node created."""
init_counter()
with FileLock(_COUNTER_FILE):
with AtomicFile(_COUNTER_FILE, mode="w") as fh:
fh.write("%d\n" % _COUNTER)
# Register function atexit
import atexit
atexit.register(save_lastnode_id)
| mit |
mavlyutovrus/interval_index | python/draw_individ_graphs.py | 1 | 14796 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib
from pylab import *
import numpy
from copy_reg import remove_extension
from heapq import heappush
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
algo2index = []
def load_data(filename):
all_results = {}
data_set = ""
mapper = ""
data = {}
mapper_results = []
by_datasets = {}
for line in open(filename):
if not "\t" in line:
continue
chunks = line.split("\t")
ds, algo = chunks[:2]
results = [float(chunk) for chunk in chunks[2:]]
by_datasets.setdefault(ds, {})
by_datasets[ds].setdefault(algo, []).append(results)
return by_datasets
query_len_results = load_data("../test_results/query_len_individ.txt")
chi_results = load_data("../test_results/checkpoint_interval.txt")
#avg_overlapping_results = load_data("../test_results/avg_overlapping.txt")
#stdev_results = load_data("../test_results/avg_overlapping_stdev.txt")
algos = [name for name in query_len_results.values()[0].keys()]
algos.sort()
#algos += ["avg_results_per_query"]
algo2index = {}
for index in xrange(len(algos)):
algo2index[algos[index]] = index
colors = ["red", "green", "blue", "black", "silver", "aqua", "purple", "orange", "brown"]
#colors = ["#F26C4F", "#F68E55", "#FFF467", "#3BB878", "#00BFF3", "#605CA8", "#A763A8"]
#colors_darker = ["#9E0B0F", "#A0410D", "#ABA000", "#007236", "#0076A3", "#1B1464", "#630460"]
RESPONSE_SIZE_POS = 0
TIME_RESULT_POS = 3
MEM_CONSUMPTION_POS = 1
suffix = ""
file_type = ".png"
def draw_scatter_plot(x_values, algo2results, title, filename, yaxis_name, xaxis_name, y_log=False, x_log=False, left_margin=0.2, xticks_labels = None):
fig = figure(figsize=(8, 6), dpi=80)
grid(b=True, which='major', color='gray', axis="both", linestyle='--', zorder=-1)
font_size = 20
#plt.subplot2grid((1,3), (0,0), colspan=2)
algos = [algo for algo in algo2results.keys()]
algos.sort()
print algos
for algo_index in xrange(len(algos)):
algo = algos[algo_index]
#if algo == "upper":
# continue
line, = plt.plot(x_values, algo2results[algo], lw=3, color=colors[algo_index])
line.set_zorder(1)
if y_log:
plt.yscale('log')
if x_log:
plt.xscale('log')
ylabel = plt.ylabel(yaxis_name)
ylabel.set_fontsize(font_size)
xlabel = plt.xlabel(xaxis_name)
xlabel.set_fontsize(font_size)
for ytick in plt.yticks()[1]:
ytick.set_fontsize(font_size / 1.5)
"""
print x_values
MAX_TICKS = 10
step = 100000
for step in [100000, 200000, 500000, 1000000, 2000000, 5000000, 10000000]:
if MAX_TICKS * step >= max(x_values):
break
xticks2show = range(0, max(x_values) + 1, step)
xticks2show_labels = [value and str(int(value)) or "" for value in xticks2show]
"""
xticks2show = x_values
if not xticks_labels:
xticks2show_labels = [str(value) for value in xticks2show]
else:
xticks2show_labels = xticks_labels
plt.xlim([0, max(x_values)])
max_y = 0
for values in algo2results.values():
print values
max_y = max(max(values), max_y)
plt.ylim([0, max_y * 1.1])
plt.xticks(xticks2show, xticks2show_labels, fontsize=font_size / 1.5)
#legend = plt.legend( (p1[0], p2[0]), ("shuffled", "sorted"), shadow=False, loc=1, fontsize=font_size)
#legend.draw_frame(False)
fig.patch.set_visible(False)
#figtext(.94, 0.4, u"© http://exascale.info", rotation='vertical')
title = plt.title(title)
title.set_fontsize(font_size)
savefig(filename + file_type, transparent="True", pad_inches=0)
plt.show()
#exit()
def draw_legend():
font_size= 20
fig = figure(figsize=(8, 6), dpi=80)
p1 = plt.bar(range(len(algo2index)), range(len(algo2index)), 1.0, color="#7FCA9F")
for algo_index in xrange(len(algos)):
p1[algo_index].set_color(colors[algo_index])
fig = figure(figsize=(12, 6), dpi=80)
desc = [algo for algo in algos]
legend = plt.legend( p1, desc, shadow=False, loc=1, fontsize=font_size)
legend.draw_frame(True)
savefig("../graphs/test_results/legend" + file_type, transparent="True", pad_inches=0)
#draw_legend()
### query_len
def calc_avg_minus_extremes(values):
values.sort()
values = values[1:-1]
import numpy
return float(sum(values)) / len(values)#, numpy.std(values)
#chis
if 1:
ds_name2x = [(float(ds_name.split("/")[-1].split("_")[-1].replace(".txt", "")), ds_name) for ds_name in chi_results.keys()]
algos_results = chi_results.values()[0]
algos = [algo for algo in algos_results.keys()]
algos.sort()
x_values = [int(algo[1:]) for algo in algos]
print x_values
trends = {}
for algo_index in xrange(len(algos)):
algo = algos[algo_index]
all_algo_results = algos_results[algo]
algo_result = calc_avg_minus_extremes([results[-1] for results in all_algo_results])
trends.setdefault("main", []).append((algo_index, algo_result))
algo_result = calc_avg_minus_extremes([results[-2] for results in all_algo_results])
trends.setdefault("middle", []).append((algo_index, algo_result))
algo_result = calc_avg_minus_extremes([results[-3] for results in all_algo_results])
trends.setdefault("lower", []).append((algo_index, algo_result))
#trends.setdefault(algo, []).append((ds_name2x_pos[ds_name], [result[4] for result in results]))
for algo in trends.keys():
trends[algo].sort()
trends[algo] = [value for index, value in trends[algo]]
draw_scatter_plot(x_values, trends, "Query execution time = f(query_len)",
"../graphs/test_results/checkpoint_intervals",
"100K requests time, ms", "Checkpoint interval",
y_log=False, x_log=False, left_margin=0.2)
if 0:
ds_name2x = [(float(ds_name.split("/")[-1].split("_")[-1].replace(".txt", "")), ds_name) for ds_name in query_len_results.keys()]
ds_name2x.sort()
x_values = [value for value, _ in ds_name2x]
ds_name2x_pos = {}
for index in xrange(len(ds_name2x)):
value, key = ds_name2x[index]
ds_name2x_pos[key] = index
print algos
algo_index = 3;
trends = {}
for ds_name, algos_results in query_len_results.items():
for algo, results in algos_results.items():
trends.setdefault(algo, []).append((ds_name2x_pos[ds_name], [result[4] for result in results]))
"""
if algo == algos[0]:
trends.setdefault("0lower", []).append((ds_name2x_pos[ds_name], [result[2] for result in results]))
trends.setdefault("0middle", []).append((ds_name2x_pos[ds_name], [result[3] for result in results]))
trends.setdefault("0upper", []).append((ds_name2x_pos[ds_name], [result[4] for result in results]))
if algo == algos[1]:
trends.setdefault("1lower", []).append((ds_name2x_pos[ds_name], [result[2] for result in results]))
trends.setdefault("1middle", []).append((ds_name2x_pos[ds_name], [result[3] for result in results]))
trends.setdefault("1upper", []).append((ds_name2x_pos[ds_name], [result[4] for result in results]))
"""
print "start"
for algo in trends.keys():
trends[algo].sort()
trend = []
for index, values in trends[algo]:
trend += [calc_avg_minus_extremes(values)]
trends[algo] = trend
#trends["avg_results_per_query"] = avg_results_per_query_trend
draw_scatter_plot(x_values, trends, "Query execution time = f(query_len)",
"../graphs/test_results/query_len_individ",
"Avg time per returned interval, ms", "Query length",
y_log=False, x_log=True, left_margin=0.2)
### average overlapping - query time
if 0:
for key in query_len_results.keys():
query_size = float(key.split("/")[-1].split("_")[-1].replace(".txt", ""))
if query_size > 150:
del query_len_results[key]
ds_name2x = [(float(ds_name.split("/")[-1].split("_")[-1].replace(".txt", "")), ds_name) for ds_name in avg_overlapping_results.keys()]
ds_name2x.sort()
x_ticks_labels = [str(int(value * 10) / 10.0) for value, _ in ds_name2x]
for index in xrange(len(x_ticks_labels)):
if index % 2:
x_ticks_labels[index] = ""
x_values = [index for index in xrange(len(ds_name2x))]
ds_name2x_pos = {}
for index in xrange(len(ds_name2x)):
value, key = ds_name2x[index]
ds_name2x_pos[key] = index
trends = {}
for ds_name, algos_results in avg_overlapping_results.items():
for algo, results in algos_results.items():
trends.setdefault(algo, []).append( (ds_name2x_pos[ds_name], [(result[RESPONSE_SIZE_POS], result[TIME_RESULT_POS]) for result in results]))
for algo in trends.keys():
trends[algo].sort()
trend = []
for index, values in trends[algo]:
response_size = values[0][0]
values = [value for _, value in values]
trend += [1000 * calc_avg_minus_extremes(values) / float(response_size)]
trends[algo] = trend
draw_scatter_plot(x_values, trends, "Query time = f(avg_overlapping)",
"../graphs/test_results/avg_overlapping",
"Avg time per returned interval, ms", "Avg. overlapping",
y_log=False, x_log=False, left_margin=0.2, xticks_labels= x_ticks_labels)
### average overlapping stdev - query time
if 0:
ds_name2x = [(float(ds_name.split("/")[-1].split("_")[-1].replace(".txt", "")), ds_name) for ds_name in stdev_results.keys()]
ds_name2x.sort()
x_values = [value for value, _ in ds_name2x]
ds_name2x_pos = {}
for index in xrange(len(ds_name2x)):
value, key = ds_name2x[index]
ds_name2x_pos[key] = index
trends = {}
for ds_name, algos_results in stdev_results.items():
for algo, results in algos_results.items():
trends.setdefault(algo, []).append( (ds_name2x_pos[ds_name], [(result[RESPONSE_SIZE_POS], result[TIME_RESULT_POS]) for result in results]))
for algo in trends.keys():
trends[algo].sort()
trend = []
for index, values in trends[algo]:
print set([rs for rs, _ in values])
response_size = values[0][0]
values = [value for _, value in values]
trend += [1000 * calc_avg_minus_extremes(values) / float(response_size)]
trends[algo] = trend
draw_scatter_plot(x_values, trends, "Query time = f(overlapping stdev)",
"../graphs/test_results/stddev",
"Avg time per returned interval, ms", "Intervals length range / 2",
y_log=False, x_log=False, left_margin=0.2)
#draw_bar_charts(trends, "100K queries time = f(stdev)", "100K queries time, s", "../graphs/test_results/stddev")
### average overlapping - mem consumption
if 0:
for key in query_len_results.keys():
query_size = float(key.split("/")[-1].split("_")[-1].replace(".txt", ""))
#if query_size > 150:
# del query_len_results[key]
ds_name2x = [(float(ds_name.split("/")[-1].split("_")[-1].replace(".txt", "")), ds_name) for ds_name in avg_overlapping_results.keys()]
ds_name2x.sort()
x_ticks_labels = [str(int(value * 10) / 10.0) for value, _ in ds_name2x]
for index in xrange(len(x_ticks_labels)):
if index % 2:
x_ticks_labels[index] = ""
x_values = [index for index in xrange(len(ds_name2x))]
ds_name2x_pos = {}
for index in xrange(len(ds_name2x)):
value, key = ds_name2x[index]
ds_name2x_pos[key] = index
trends = {}
for ds_name, algos_results in avg_overlapping_results.items():
for algo, results in algos_results.items():
trends.setdefault(algo, []).append( (ds_name2x_pos[ds_name], [result[MEM_CONSUMPTION_POS] for result in results]))
for algo in trends.keys():
trends[algo].sort()
trend = []
for index, values in trends[algo]:
trend += [calc_avg_minus_extremes(values)]
trends[algo] = trend
draw_scatter_plot(x_values, trends, "Memory consumption = f(avg_overlapping)",
"../graphs/test_results/mem_consumption_avg_overlapping",
"Memory consumption, kb", "Avg. overlapping",
y_log=False, x_log=False, left_margin=0.2, xticks_labels= x_ticks_labels)
### average overlapping stdev - mem consumption
if 0:
for key in query_len_results.keys():
query_size = float(key.split("/")[-1].split("_")[-1].replace(".txt", ""))
#if query_size > 150:
# del query_len_results[key]
ds_name2x = [(float(ds_name.split("/")[-1].split("_")[-1].replace(".txt", "")), ds_name) for ds_name in stdev_results.keys()]
ds_name2x.sort()
x_ticks_labels = [str(int(value * 10) / 10.0) for value, _ in ds_name2x]
for index in xrange(len(x_ticks_labels)):
if index % 2:
x_ticks_labels[index] = ""
x_values = [index for index in xrange(len(ds_name2x))]
ds_name2x_pos = {}
for index in xrange(len(ds_name2x)):
value, key = ds_name2x[index]
ds_name2x_pos[key] = index
trends = {}
for ds_name, algos_results in stdev_results.items():
for algo, results in algos_results.items():
trends.setdefault(algo, []).append( (ds_name2x_pos[ds_name], [result[MEM_CONSUMPTION_POS] for result in results]))
for algo in trends.keys():
trends[algo].sort()
trend = []
for index, values in trends[algo]:
trend += [calc_avg_minus_extremes(values)]
trends[algo] = trend
draw_scatter_plot(x_values, trends, "Memory consumption = f(avg_overlapping_stdev)",
"../graphs/test_results/mem_consumption_avg_overlapping_stdev",
"Memory consumption, kb", "Avg. overlapping",
y_log=False, x_log=False, left_margin=0.2, xticks_labels= x_ticks_labels)
| apache-2.0 |
ChanderG/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
tomlof/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 25 | 16022 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_moons
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
def test_make_moons():
X, y = make_moons(3, shuffle=False)
for x, label in zip(X, y):
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
dist_sqr = ((x - center) ** 2).sum()
assert_almost_equal(dist_sqr, 1.0,
err_msg="Point is not on expected unit circle")
| bsd-3-clause |
yuriyfilonov/text2vec_old | src/connectors.py | 1 | 5199 | import os
import glob
import gzip
import re
import numpy as np
import pandas as pd
class TextFilesConnector:
def __init__(self, inputDirectoryPath):
pathName = inputDirectoryPath + '/*.txt'
self.textFilePaths = glob.glob(pathName)
self.textFilePaths = sorted(self.textFilePaths)
def count(self):
return len(self.textFilePaths)
def iterate(self):
for textFileIndex, textFilePath in enumerate(self.textFilePaths):
with open(textFilePath, 'r') as textFile:
textFileName = os.path.basename(textFilePath).split('.')[0]
text = textFile.read()
yield textFileIndex, textFileName, text
class WikipediaConnector:
def __init__(self, inputDirectoryPath):
pathName = inputDirectoryPath + '/*.txt.gz'
self.dumpPaths = glob.glob(pathName)
self.dumpPaths = self.dumpPaths[:10]
@staticmethod
def filterPage(page):
name, text = page
if ':' in name:
return False
mayReferTo = '{0} may refer to'.format(name).lower()
if text.startswith(mayReferTo):
return False
if text.startswith('#redirect'):
return False
if len(text) < 10:
return False
return True
@staticmethod
def unpackDump(dumpPath):
dumpName = os.path.basename(dumpPath).split('.')[0]
pages = []
try:
with gzip.open(dumpPath, 'rb') as dumpFile:
dumpText = dumpFile.read()
names = [name.strip() for name in re.findall('^\[\[(?P<title>[^\]]+)\]\]\s?$', dumpText, flags=re.M)]
texts = [text.strip() for text in re.split('^\[\[[^\]]+\]\]\s?$', dumpText, flags=re.M) if text]
pages = zip(names, texts)
pages = filter(WikipediaConnector.filterPage, pages)
except:
pass
return dumpName, pages
@staticmethod
def stripWikiMarkup(name, text):
name = re.sub('[^_a-zA-Z0-9\s\(\)]', '', name).strip()
restrictedHeaders = ['see also', 'footnotes', 'references', 'further reading', 'external links', 'books']
headings = [name] + re.findall('^=+\s*([^=]+)\s*=+$', text, flags=re.M)
paragraphs = re.split('^=+\s*[^=]+\s*=+$', text, flags=re.M)
text = ''
for heading, paragraph in zip(headings, paragraphs):
if heading.lower() not in restrictedHeaders:
text += paragraph
return name, text
def count(self):
return len(self.dumpPaths)
def iterate(self):
for dumpIndex, dumpPath in enumerate(self.dumpPaths):
dumpName, pages = WikipediaConnector.unpackDump(dumpPath)
if any(pages):
for name, text in pages:
name, text = WikipediaConnector.stripWikiMarkup(name, text)
yield dumpIndex, name, text
class ImdbConnector:
def __init__(self, inputDirectoryPath):
self.inputDirectoryPath = inputDirectoryPath
self.trainDir = os.path.join(self.inputDirectoryPath, 'train')
self.trainNegativeDir = os.path.join(self.trainDir, 'neg')
self.trainPositiveDir = os.path.join(self.trainDir, 'pos')
self.trainUnsupervisedDir = os.path.join(self.trainDir, 'unsup')
self.testDir = os.path.join(self.inputDirectoryPath, 'test')
self.testNegativeDir = os.path.join(self.testDir, 'neg')
self.testPositiveDir = os.path.join(self.testDir, 'pos')
dirs = [self.trainNegativeDir, self.trainPositiveDir, self.trainUnsupervisedDir,
self.testNegativeDir, self.testPositiveDir]
self.textFilesPaths = []
for dir in dirs:
pathName = dir + '/*.txt'
self.textFilesPaths += glob.glob(pathName)
self.textFilesPaths = self.textFilesPaths
def count(self):
return len(self.textFilesPaths)
def iterate(self):
for textFileIndex, textFilePath in enumerate(self.textFilesPaths):
with open(textFilePath, 'r') as textFile:
text = textFile.read()
yield textFileIndex, textFilePath, text
class RottenTomatosConnector:
def __init__(self, inputDirectoryPath):
self.inputDirectoryPath = inputDirectoryPath
self.trainFilePath = os.path.join(self.inputDirectoryPath, 'train.tsv')
self.testFilePath = os.path.join(self.inputDirectoryPath, 'test.tsv')
def count(self):
trainSet = pd.read_csv(self.trainFilePath, sep='\t')
testSet = pd.read_csv(self.testFilePath, sep='\t')
dataSet = pd.concat([trainSet, testSet])
dataSet = dataSet.groupby('SentenceId').first()
return len(dataSet)
def iterate(self):
trainSet = pd.read_csv(self.trainFilePath, sep='\t')
testSet = pd.read_csv(self.testFilePath, sep='\t')
dataSet = pd.concat([trainSet, testSet])
dataSet = dataSet.groupby('SentenceId').first()
phraseIndex = 0
for phraseId, phrase in zip(dataSet['PhraseId'], dataSet['Phrase']):
yield phraseIndex, str(phraseId), phrase
phraseIndex += 1 | apache-2.0 |
almarklein/scikit-image | doc/examples/plot_view_as_blocks.py | 2 | 1972 | """
============================
Block views on images/arrays
============================
This example illustrates the use of `view_as_blocks` from
`skimage.util.shape`. Block views can be incredibly useful when one
wants to perform local operations on non-overlapping image patches.
We use `lena` from `skimage.data` and virtually 'slice' it into square
blocks. Then, on each block, we either pool the mean, the max or the
median value of that block. The results are displayed altogether, along
with a spline interpolation of order 3 rescaling of the original `lena`
image.
"""
import numpy as np
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from skimage import data
from skimage import color
from skimage.util.shape import view_as_blocks
# -- get `lena` from skimage.data in grayscale
l = color.rgb2gray(data.lena())
# -- size of blocks
block_shape = (4, 4)
# -- see `lena` as a matrix of blocks (of shape
# `block_shape`)
view = view_as_blocks(l, block_shape)
# -- collapse the last two dimensions in one
flatten_view = view.reshape(view.shape[0], view.shape[1], -1)
# -- resampling `lena` by taking either the `mean`,
# the `max` or the `median` value of each blocks.
mean_view = np.mean(flatten_view, axis=2)
max_view = np.max(flatten_view, axis=2)
median_view = np.median(flatten_view, axis=2)
# -- display resampled images
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.set_title("Original rescaled with\n spline interpolation (order=3)")
l_resized = ndi.zoom(l, 2, order=3)
ax0.imshow(l_resized, cmap=cm.Greys_r)
ax1.set_title("Block view with\n local mean pooling")
ax1.imshow(mean_view, cmap=cm.Greys_r)
ax2.set_title("Block view with\n local max pooling")
ax2.imshow(max_view, cmap=cm.Greys_r)
ax3.set_title("Block view with\n local median pooling")
ax3.imshow(median_view, cmap=cm.Greys_r)
plt.subplots_adjust(hspace=0.4, wspace=0.4)
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/tests/test_base.py | 16 | 11355 | # Author: Gael Varoquaux
# License: BSD 3 clause
import sys
import numpy as np
import scipy.sparse as sp
import sklearn
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.utils import deprecated
from sklearn.base import TransformerMixin
from sklearn.utils.mocking import MockDataFrame
import pickle
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class ModifyInitParams(BaseEstimator):
"""Deprecated behavior.
Equal parameters but with a type cast.
Doesn't fulfill a is a
"""
def __init__(self, a=np.array([0])):
self.a = a.copy()
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""scikit-learn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_copy_init_params():
# test for deprecation warning when copying or casting an init parameter
est = ModifyInitParams()
message = ("Estimator ModifyInitParams modifies parameters in __init__. "
"This behavior is deprecated as of 0.18 and support "
"for this behavior will be removed in 0.20.")
assert_warns_message(DeprecationWarning, message, clone, est)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
PY26 = sys.version_info[:2] == (2, 6)
if PY26:
# sp.dok_matrix can not be deepcopied in Python 2.6
sparse_matrix_classes.remove(sp.dok_matrix)
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline(
[('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
def test_clone_pandas_dataframe():
class DummyEstimator(BaseEstimator, TransformerMixin):
"""This is a dummy class for generating numerical features
This feature extractor extracts numerical features from pandas data
frame.
Parameters
----------
df: pandas data frame
The pandas data frame parameter.
Notes
-----
"""
def __init__(self, df=None, scalar_param=1):
self.df = df
self.scalar_param = scalar_param
def fit(self, X, y=None):
pass
def transform(self, X, y=None):
pass
# build and clone estimator
d = np.arange(10)
df = MockDataFrame(d)
e = DummyEstimator(df, scalar_param=1)
cloned_e = clone(e)
# the test
assert_true((e.df == cloned_e.df).values.all())
assert_equal(e.scalar_param, cloned_e.scalar_param)
class TreeNoVersion(DecisionTreeClassifier):
def __getstate__(self):
return self.__dict__
class TreeBadVersion(DecisionTreeClassifier):
def __getstate__(self):
return dict(self.__dict__.items(), _sklearn_version="something")
def test_pickle_version_warning():
# check that warnings are raised when unpickling in a different version
# first, check no warning when in the same version:
iris = datasets.load_iris()
tree = DecisionTreeClassifier().fit(iris.data, iris.target)
tree_pickle = pickle.dumps(tree)
assert_true(b"version" in tree_pickle)
assert_no_warnings(pickle.loads, tree_pickle)
# check that warning is raised on different version
tree = TreeBadVersion().fit(iris.data, iris.target)
tree_pickle_other = pickle.dumps(tree)
message = ("Trying to unpickle estimator TreeBadVersion from "
"version {0} when using version {1}. This might lead to "
"breaking code or invalid results. "
"Use at your own risk.".format("something",
sklearn.__version__))
assert_warns_message(UserWarning, message, pickle.loads, tree_pickle_other)
# check that not including any version also works:
# TreeNoVersion has no getstate, like pre-0.18
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
assert_false(b"version" in tree_pickle_noversion)
message = message.replace("something", "pre-0.18")
message = message.replace("TreeBadVersion", "TreeNoVersion")
# check we got the warning about using pre-0.18 pickle
assert_warns_message(UserWarning, message, pickle.loads,
tree_pickle_noversion)
# check that no warning is raised for external estimators
TreeNoVersion.__module__ = "notsklearn"
assert_no_warnings(pickle.loads, tree_pickle_noversion)
| bsd-3-clause |
hydroffice/hyo_soundspeed | hyo2/soundspeed/db/plot.py | 1 | 12018 | import os
import numpy as np
# noinspection PyUnresolvedReferences
from PySide2 import QtWidgets
from matplotlib import rc_context
import cartopy.crs as ccrs
from cartopy.feature import NaturalEarthFeature
import matplotlib.pyplot as plt
import logging
logger = logging.getLogger(__name__)
class PlotDb:
"""Class that plots sound speed db data"""
font_size = 6
rc_context = {
'font.family': 'sans-serif',
'font.sans-serif': ['Tahoma', 'Bitstream Vera Sans', 'Lucida Grande', 'Verdana'],
'font.size': font_size,
'figure.titlesize': font_size + 1,
'axes.labelsize': font_size,
'legend.fontsize': font_size,
'xtick.labelsize': font_size - 1,
'ytick.labelsize': font_size - 1,
'axes.linewidth': 0.5,
'axes.xmargin': 0.01,
'axes.ymargin': 0.01,
'lines.linewidth': 1.0,
}
def __init__(self, db):
self.db = db
@classmethod
def raise_window(cls):
cfm = plt.get_current_fig_manager()
cfm.window.activateWindow()
cfm.window.raise_()
@classmethod
def plots_folder(cls, output_folder):
folder = os.path.join(output_folder, "plots")
if not os.path.exists(folder):
os.makedirs(folder)
return folder
def map_profiles(self, pks=None, output_folder=None, save_fig=False, show_plot=False):
"""plot all the ssp in the database"""
with rc_context(self.rc_context):
if not save_fig:
plt.ion()
rows = self.db.list_profiles()
if rows is None:
raise RuntimeError("Unable to retrieve ssp view rows > Empty database?")
if len(rows) == 0:
raise RuntimeError("Unable to retrieve ssp view rows > Empty database?")
# prepare the data
ssp_x = list()
ssp_y = list()
ssp_label = list()
for row in rows:
if pks is not None: # only if a pk-based filter was passed
if row[0] in pks:
ssp_x.append(row[2].x)
ssp_y.append(row[2].y)
ssp_label.append(row[0])
else:
ssp_x.append(row[2].x)
ssp_y.append(row[2].y)
ssp_label.append(row[0])
# make the world map
plt.close("Profiles Map")
_ = plt.figure("Profiles Map")
ax = plt.subplot(111, projection=ccrs.PlateCarree())
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
plt.ioff()
# noinspection PyUnresolvedReferences
ax.scatter(ssp_x, ssp_y, marker='o', s=14, color='r', zorder=4)
# noinspection PyUnresolvedReferences
ax.scatter(ssp_x, ssp_y, marker='.', s=1, color='k', zorder=4)
delta = 5.0
y_min = min(ssp_y)
if (y_min - delta) < -90.0:
y_min = -90.0
else:
y_min -= delta
y_max = max(ssp_y)
if (y_max + delta) > 90.0:
y_max = 90.0
else:
y_max += delta
x_min = min(ssp_x)
if (x_min - delta) < -180.0:
x_min = -180.0
else:
x_min = x_min - delta
x_max = max(ssp_x)
if (x_max + delta) > 180.0:
x_max = 180.0
else:
x_max += delta
# logger.debug("%s %s, %s %s" % (y_min, y_max, x_min, x_max))
# noinspection PyUnresolvedReferences
ax.set_extent([x_min, x_max, y_min, y_max], crs=ccrs.PlateCarree())
scale = '110m'
# if (x_max - x_min) < 30 and (y_max - y_min) < 30:
# scale = '50m'
ocean = NaturalEarthFeature('physical', 'ocean', scale, edgecolor='#777777',
facecolor="#5da1c9", zorder=2)
# noinspection PyUnresolvedReferences
ax.add_feature(ocean)
# noinspection PyUnresolvedReferences
ax.gridlines(color='#909090', linestyle='--', zorder=3)
if save_fig and (output_folder is not None):
plt.savefig(os.path.join(self.plots_folder(output_folder), 'ssp_map.png'),
bbox_inches='tight')
elif show_plot:
plt.show()
return True
@staticmethod
def _set_inset_color(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
class AvgSsp:
def __init__(self):
# create and populate list used in the calculations
self.limits = list() # bin limits
self.depths = list() # avg depth for each bin
self.bins = list() # a list of list with all the value within a bin
# populating
for i, z in enumerate(range(10, 781, 10)):
self.limits.append(z)
# self.depths.append(z + 5.)
self.bins.append(list())
# output lists
self.min_2std = list()
self.max_2std = list()
self.mean = list()
def add_samples(self, depths, values):
for i, d in enumerate(depths):
for j, lim in enumerate(self.limits):
if d < lim:
self.bins[j].append(values[i])
break
def calc_avg(self):
for i, i_bin in enumerate(self.bins):
# to avoid unstable statistics
if len(i_bin) < 3:
continue
if i == 0:
self.depths.append(0.)
elif i == (len(self.bins) - 1):
self.depths.append(780.)
else:
self.depths.append(self.limits[i] - 5.)
avg = np.mean(i_bin)
std = np.std(i_bin)
self.mean.append(avg)
self.min_2std.append(avg - 2 * std)
self.max_2std.append(avg + 2 * std)
def aggregate_plot(self, dates, output_folder, save_fig=False):
"""aggregate plot with all the SSPs between the passed dates"""
if not save_fig:
plt.ion()
ts_list = self.db.list_profiles()
if ts_list is None:
raise RuntimeError("Unable to retrieve the day list > Empty database?")
if len(ts_list) == 0:
raise RuntimeError("Unable to retrieve the day list > Empty database?")
# start a new figure
plt.close("Aggregate Plot")
fig, ax = plt.subplots(num="Aggregate Plot")
plt.title("Aggregate SSP plot [from: %s to: %s]" % (dates[0], dates[1]))
ax.invert_yaxis()
ax.set_xlim(1440, 1580)
ax.set_ylim(780, 0)
plt.xlabel('Sound Speed [m/s]', fontsize=10)
plt.ylabel('Depth [m]', fontsize=10)
ax.grid(linewidth=0.8, color=(0.3, 0.3, 0.3))
avg_ssp = PlotDb.AvgSsp()
ssp_count = 0
for ts_pk in ts_list:
tmp_date = ts_pk[1].date()
if (tmp_date < dates[0]) or (tmp_date > dates[1]):
continue
ssp_count += 1
# print(ts_pk[1], ts_pk[0])
tmp_ssp = self.db.profile_by_pk(ts_pk[0])
# print(tmp_ssp)
ax.plot(tmp_ssp.cur.proc.speed[tmp_ssp.cur.proc_valid], tmp_ssp.cur.proc.depth[tmp_ssp.cur.proc_valid], '.',
color=(0.85, 0.85, 0.85), markersize=2
# label='%s [%04d] ' % (ts_pk[0].time(), ts_pk[1])
)
avg_ssp.add_samples(tmp_ssp.cur.proc.depth[tmp_ssp.cur.proc_valid],
tmp_ssp.cur.proc.speed[tmp_ssp.cur.proc_valid])
avg_ssp.calc_avg()
ax.plot(avg_ssp.mean, avg_ssp.depths, '-b', linewidth=2)
ax.plot(avg_ssp.min_2std, avg_ssp.depths, '--b', linewidth=1)
ax.plot(avg_ssp.max_2std, avg_ssp.depths, '--b', linewidth=1)
# fill between std-curves
# ax.fill_betweenx(avg_ssp.depths, avg_ssp.min_2std, avg_ssp.max_2std, color='b', alpha='0.1')
if save_fig:
plt.savefig(os.path.join(self.plots_folder(output_folder), 'aggregate_%s_%s.png' % (dates[0], dates[1])),
bbox_inches='tight')
else:
plt.show()
# if not save_fig:
# plt.show() # issue: QCoreApplication::exec: The event loop is already running
logger.debug("plotted SSPs: %d" % ssp_count)
return True
def daily_plots(self, project_name, output_folder, save_fig=False):
"""plot all the SSPs by day"""
if not save_fig:
plt.ion()
rows = self.db.list_profiles()
if rows is None:
logger.warning("Unable to retrieve ssp view rows > Empty database?")
return False
if len(rows) == 0:
logger.warning("Unable to retrieve ssp view rows > Empty database?")
return False
# retrieve the timestamps
ts_list = self.db.timestamp_list()
# print(ts_list)
# find the days
date_list = list()
for ts in ts_list:
date = ts[0].date()
if date not in date_list:
date_list.append(date)
# print(date_list)
# create the required figures and prepare the dict to count the plots
date_plots = dict()
for date in date_list:
date_plots[date] = 0
fig, ax = plt.subplots(num=date_list.index(date))
ax.invert_yaxis()
logger.info("create: %s" % date_list.index(date))
# plot each profile
for row in rows:
row_date = row[1].date() # 1 is the cast_datetime
logger.info("plot: %s" % date_list.index(row_date))
date_plots[row_date] += 1
fig = plt.figure(date_list.index(row_date))
row_ssp = self.db.profile_by_pk(row[0])
fig.get_axes()[0].plot(row_ssp.cur.proc.speed[row_ssp.cur.proc_valid],
row_ssp.cur.proc.depth[row_ssp.cur.proc_valid],
label='%s [%04d]' % (row[1].time(), row[0]))
# print(date_plots)
# finishing up the plots
for date in date_list:
fig = plt.figure(date_list.index(date))
plt.title("Day #%s: %s (profiles: %s)" % (date_list.index(date) + 1, date, date_plots[date]))
fig.get_axes()[0].set_xlim(1440, 1580)
fig.get_axes()[0].set_ylim(780, 0)
plt.xlabel('Sound Speed [m/s]', fontsize=10)
plt.ylabel('Depth [m]', fontsize=10)
plt.grid()
# Now add the legend with some customizations.
legend = fig.get_axes()[0].legend(loc='lower right', shadow=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set the fontsize
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
# end
for date in date_list:
fig = plt.figure(date_list.index(date))
if save_fig:
fig.savefig(os.path.join(self.plots_folder(output_folder),
'%s.day_%2d.png' % (project_name, date_list.index(date) + 1)),
bbox_inches='tight')
else:
fig.show()
# if not save_fig:
# plt.show() # issue: QCoreApplication::exec: The event loop is already running
# for date in date_list:
# plt.close(plt.figure(date_list.index(date)))
return True
| lgpl-2.1 |
aferrugento/SemLDA | semcor_classifiers.py | 1 | 8924 | import pickle
from sklearn import datasets
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from nltk.corpus import wordnet as wn
import sys
#cria-se um dummy synset sempre que uma palavra nao esta no semcor
def main():
probs = pickle.load(open("/home/adriana/Dropbox/mine/Tese/preprocessing/data_output/classifier/words_synsets.p"))
probs_l = pickle.load(open("/home/adriana/Dropbox/mine/Tese/preprocessing/data_output/classifier/lemmas_synsets.p"))
words = pickle.load(open("/home/adriana/Dropbox/mine/Tese/preprocessing/data_output/classifier/semcor_onix_wordnetlemma_vocab.p"))
f = open("/home/adriana/Dropbox/mine/Tese/preprocessing/data_output/classifier/lda_semcor/final.gamma")
m = f.read()
f.close()
h = open("/home/adriana/Dropbox/mine/Tese/preprocessing/data_output/classifier/semcor_onix_wordnetlemma_freq.txt")
g = h.read()
h.close()
#resultados do lda
data = {}
#palavras do corpus
labels = {}
classifiers = {}
m = m.strip()
m = m.split("\n")
docs = []
for i in range(len(m)):
m[i] = m[i].strip()
m[i] = m[i].split(" ")
aux = []
for j in range(len(m[i])):
aux.append(float(m[i][j]))
docs.append(aux)
g = g.strip()
g = g.split("\n")
#inver_probs = revert_dicio(probs)
for i in range(len(g)):
g[i] = g[i].strip()
g[i] = g[i].split(" ")
for j in range(1,len(g[i])):
word = words[int(g[i][j].split(":")[0])]
#if word == 'else' or word == 'ups' or word == 'pas' or word == 'michael' or word == 'francis' or word == 'morgan' or word == 'wallace' or word == 'refresh' or word == 'samuel' or word == 'christopher' or word == 'rhode' or word == 'warwick' or word == 'multi' or word == 'edwin' or word == 'cherry' or word == 'stuart' or word == 'stag' or word == 'marcus' or word == 'francisco' or word == 'salyer' or word == 'perplex' or word == 'pel' or word == 'juan' or word == 'edward' or word == 'richard' or word == 'i.e' or word == 'gene' or word == 'crosby' or word == 'sutherland' or word == 'garry' :
# continue
try:
synsets = probs[word]
except Exception, e:
try:
synsets = probs_l[word]
except Exception, e:
continue
for synset in synsets:
if data.has_key(synset):
aux1 = data[synset]
aux2 = labels[synset]
aux1.append(docs[i])
aux2.append(int(g[i][j].split(":")[0]))
data[synset] = aux1
labels[synset] = aux2
else:
data[synset] = [docs[i]]
labels[synset] = [int(g[i][j].split(":")[0])]
for synset in data:
model = LogisticRegression()
try:
model.fit(data[synset], labels[synset])
except ValueError:
continue
classifiers[synset] = model
print len(classifiers), len(data)
pickle.dump(classifiers, open("/home/adriana/Dropbox/mine/Tese/preprocessing/data_output/classifier/synset_classifiers.p", "w"))
def check_probs(word):
probs = pickle.load(open('prob_dictio_pos2.p'))
not_here = 0
for i in probs.keys():
for k in probs.get(i).keys():
if k == word:
not_here = 1
break
if not_here == 1:
break
return not_here
def classify_data(filename):
probs = pickle.load(open('prob_dictio_pos2.p'))
classifiers = pickle.load(open("synset_classifiers.p"))
words = pickle.load(open(filename + "_vocab.p"))
f = open("infer-gamma.dat")
m = f.read()
f.close()
h = open(filename + "_freq.txt")
g = h.read()
h.close()
g = g.strip()
g = g.split("\n")
m = m.strip()
m = m.split("\n")
docs = []
for i in range(len(m)):
m[i] = m[i].strip()
m[i] = m[i].split(" ")
aux = []
for j in range(len(m[i])):
aux.append(float(m[i][j]))
docs.append(aux)
aux_probs = revert_dicio(probs)
p = open(filename + "_newformat.txt", 'w')
#p.write(str(len(g))+"\n")
synset_number = 0
synset_dic = {}
synset_file = open(filename + "_synsetVoc.txt","w")
imag_synset_number = -1
imag_synset = {}
to_write = []
for i in range(len(g)):
aux = ""
print "DOC " + str(i)
g[i] = g[i].strip()
g[i] = g[i].split(" ")
aux = g[i][0] + " "
#p.write(g[i][0] + " ")
for j in range(1, len(g[i])):
word = words.get(int(g[i][j].split(":")[0]))
#synsets = wn.synsets(word.split("_")[0], penn_to_wn(word.split("_")[1]))
#not_here = check_probs(word)
synsets = aux_probs.get(word)
if synsets == None:
aux += g[i][j] + ":1[" + str(synset_number) + ":" + str(1)+ "] "
#p.write(g[i][j] + ":1[" + str(synset_number) + ":" + str(1)+ "] ")
imag_synset[imag_synset_number] = word
synset_dic[imag_synset_number] = synset_number
#synset_file.write(str(imag_synset_number) + "\n")
synset_number += 1
imag_synset_number = imag_synset_number - 1
continue
aux += g[i][j] +':'+ str(len(synsets)) + '['
#p.write(g[i][j] +':'+ str(len(synsets)) + '[')
count = 0
for k in synsets.keys():
if classifiers.has_key(int(k)):
probas = classifiers[int(k)].predict_proba(docs[i])
classes = classifiers[int(k)].classes_
ids = -1
for c in range(len(classes)):
if int(classes[c]) == int(g[i][j].split(":")[0]):
ids = c
break
#print probas
if ids == -1 and count == len(synsets) - 1:
aux2 = 0
if synset_dic.has_key(int(k)):
aux2 = synset_dic[int(k)]
else:
synset_dic[int(k)] = synset_number
aux2 = synset_number
synset_number += 1
aux += str(aux2)+":"+ str(1) + '] '
count += 1
#p.write(str(synset_number)+":"+ str(1) + '] ')
#synset_file.write(str(int(k)) + "\n")
elif ids == -1 and count != len(synsets) - 1:
aux2 = 0
if synset_dic.has_key(int(k)):
aux2 = synset_dic[int(k)]
else:
synset_dic[int(k)] = synset_number
aux2 = synset_number
synset_number += 1
aux += str(aux2)+":"+ str(1) + ' '
count += 1
#p.write(str(synset_number)+":"+ str(1) + ' ')
#synset_file.write(str(int(k)) + "\n")
elif ids != -1 and count == len(synsets) - 1:
aux2 = 0
if synset_dic.has_key(int(k)):
aux2 = synset_dic[int(k)]
else:
synset_dic[int(k)] = synset_number
aux2 = synset_number
synset_number += 1
aux += str(aux2)+":"+ str(probas[0][c]) + '] '
count += 1
#p.write(str(synset_number)+":"+ str(probas[c]) + '] ')
#synset_file.write(str(int(k)) + "\n")
elif ids != -1 and count != len(synsets) - 1:
aux2 = 0
if synset_dic.has_key(int(k)):
aux2 = synset_dic[int(k)]
else:
synset_dic[int(k)] = synset_number
aux2 = synset_number
synset_number += 1
aux += str(aux2)+":"+ str(probas[0][c]) + ' '
count += 1
#p.write(str(synset_number)+":"+ str(probas[0][c]) + ' ')
#synset_file.write(str(int(k)) + "\n")
else:
if count == len(synsets) - 1:
aux2 = 0
if synset_dic.has_key(int(k)):
aux2 = synset_dic[int(k)]
else:
synset_dic[int(k)] = synset_number
aux2 = synset_number
synset_number += 1
aux += str(aux2)+":"+ str(1) + '] '
count += 1
#aux += str(aux2)+":"+ str(1/len(synsets))) + '] '
#p.write(str(synset_number)+":"+ str(1) + '] ')
#synset_file.write(str(int(k)) + "\n")
else:
aux2 = 0
if synset_dic.has_key(int(k)):
aux2 = synset_dic[int(k)]
else:
synset_dic[int(k)] = synset_number
aux2 = synset_number
synset_number += 1
aux += str(aux2)+":"+ str(1) + ' '
count += 1
#aux += str(aux2)+":"+ str(1/len(synsets))) + ' '
#p.write(str(synset_number)+":"+ str(1) + ' ')
#synset_file.write(str(synsets[k].offset) + "\n")
#p.write("\n")
to_write.append(aux)
ne = revert_dicio2(synset_dic)
for i in range(len(ne)):
synset_file.write(str(ne.get(i))+'\n')
synset_file.close()
p.write(str(len(ne))+"\n")
for i in range(len(to_write)):
p.write(to_write[i] + "\n")
p.close()
pickle.dump(imag_synset, open(filename + "_imag.txt", "w"))
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wn.ADJ
elif is_noun(tag):
return wn.NOUN
elif is_adverb(tag):
return wn.ADV
elif is_verb(tag):
return wn.VERB
return None
def revert_dicio2(words_ids):
new_dictio = {}
for key in words_ids:
new_dictio[words_ids[key]] = key
return new_dictio
def revert_dicio(words_ids):
new_dictio = {}
for key in words_ids:
for words in words_ids[key]:
if new_dictio.has_key(words):
aux = new_dictio[words]
aux[key] = words_ids[key][words]
new_dictio[words] = aux
else:
aux = {}
aux[key] = words_ids[key][words]
new_dictio[words] = aux
return new_dictio
if __name__ == "__main__":
#main()
classify_data(sys.argv[1])
| lgpl-2.1 |
huzq/scikit-learn | sklearn/ensemble/_hist_gradient_boosting/tests/test_loss.py | 11 | 14313 | import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy.optimize import newton
from scipy.special import logit
from sklearn.utils import assert_all_finite
from sklearn.utils.fixes import sp_version, parse_version
import pytest
from sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES
from sklearn.ensemble._hist_gradient_boosting.common import Y_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.utils._testing import skip_if_32bit
def get_derivatives_helper(loss):
"""Return get_gradients() and get_hessians() functions for a given loss.
"""
def get_gradients(y_true, raw_predictions):
# create gradients and hessians array, update inplace, and return
gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
loss.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
return gradients
def get_hessians(y_true, raw_predictions):
# create gradients and hessians array, update inplace, and return
gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
loss.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
if loss.__class__.__name__ == 'LeastSquares':
# hessians aren't updated because they're constant:
# the value is 1 (and not 2) because the loss is actually an half
# least squares loss.
hessians = np.full_like(raw_predictions, fill_value=1)
elif loss.__class__.__name__ == 'LeastAbsoluteDeviation':
# hessians aren't updated because they're constant
hessians = np.full_like(raw_predictions, fill_value=0)
return hessians
return get_gradients, get_hessians
@pytest.mark.parametrize('loss, x0, y_true', [
('least_squares', -2., 42),
('least_squares', 117., 1.05),
('least_squares', 0., 0.),
# The argmin of binary_crossentropy for y_true=0 and y_true=1 is resp. -inf
# and +inf due to logit, cf. "complete separation". Therefore, we use
# 0 < y_true < 1.
('binary_crossentropy', 0.3, 0.1),
('binary_crossentropy', -12, 0.2),
('binary_crossentropy', 30, 0.9),
('poisson', 12., 1.),
('poisson', 0., 2.),
('poisson', -22., 10.),
])
@pytest.mark.skipif(sp_version == parse_version('1.2.0'),
reason='bug in scipy 1.2.0, see scipy issue #9608')
@skip_if_32bit
def test_derivatives(loss, x0, y_true):
# Check that gradients are zero when the loss is minimized on a single
# value/sample using Halley's method with the first and second order
# derivatives computed by the Loss instance.
# Note that methods of Loss instances operate on arrays while the newton
# root finder expects a scalar or a one-element array for this purpose.
loss = _LOSSES[loss](sample_weight=None)
y_true = np.array([y_true], dtype=Y_DTYPE)
x0 = np.array([x0], dtype=Y_DTYPE).reshape(1, 1)
get_gradients, get_hessians = get_derivatives_helper(loss)
def func(x: np.ndarray) -> np.ndarray:
if isinstance(loss, _LOSSES['binary_crossentropy']):
# Subtract a constant term such that the binary cross entropy
# has its minimum at zero, which is needed for the newton method.
actual_min = loss.pointwise_loss(y_true, logit(y_true))
return loss.pointwise_loss(y_true, x) - actual_min
else:
return loss.pointwise_loss(y_true, x)
def fprime(x: np.ndarray) -> np.ndarray:
return get_gradients(y_true, x)
def fprime2(x: np.ndarray) -> np.ndarray:
return get_hessians(y_true, x)
optimum = newton(func, x0=x0, fprime=fprime, fprime2=fprime2,
maxiter=70, tol=2e-8)
# Need to ravel arrays because assert_allclose requires matching dimensions
y_true = y_true.ravel()
optimum = optimum.ravel()
assert_allclose(loss.inverse_link_function(optimum), y_true)
assert_allclose(func(optimum), 0, atol=1e-14)
assert_allclose(get_gradients(y_true, optimum), 0, atol=1e-7)
@pytest.mark.parametrize('loss, n_classes, prediction_dim', [
('least_squares', 0, 1),
('least_absolute_deviation', 0, 1),
('binary_crossentropy', 2, 1),
('categorical_crossentropy', 3, 3),
('poisson', 0, 1),
])
@pytest.mark.skipif(Y_DTYPE != np.float64,
reason='Need 64 bits float precision for numerical checks')
def test_numerical_gradients(loss, n_classes, prediction_dim, seed=0):
# Make sure gradients and hessians computed in the loss are correct, by
# comparing with their approximations computed with finite central
# differences.
# See https://en.wikipedia.org/wiki/Finite_difference.
rng = np.random.RandomState(seed)
n_samples = 100
if loss in ('least_squares', 'least_absolute_deviation'):
y_true = rng.normal(size=n_samples).astype(Y_DTYPE)
elif loss in ('poisson'):
y_true = rng.poisson(size=n_samples).astype(Y_DTYPE)
else:
y_true = rng.randint(0, n_classes, size=n_samples).astype(Y_DTYPE)
raw_predictions = rng.normal(
size=(prediction_dim, n_samples)
).astype(Y_DTYPE)
loss = _LOSSES[loss](sample_weight=None)
get_gradients, get_hessians = get_derivatives_helper(loss)
# only take gradients and hessians of first tree / class.
gradients = get_gradients(y_true, raw_predictions)[0, :].ravel()
hessians = get_hessians(y_true, raw_predictions)[0, :].ravel()
# Approximate gradients
# For multiclass loss, we should only change the predictions of one tree
# (here the first), hence the use of offset[0, :] += eps
# As a softmax is computed, offsetting the whole array by a constant would
# have no effect on the probabilities, and thus on the loss
eps = 1e-9
offset = np.zeros_like(raw_predictions)
offset[0, :] = eps
f_plus_eps = loss.pointwise_loss(y_true, raw_predictions + offset / 2)
f_minus_eps = loss.pointwise_loss(y_true, raw_predictions - offset / 2)
numerical_gradients = (f_plus_eps - f_minus_eps) / eps
# Approximate hessians
eps = 1e-4 # need big enough eps as we divide by its square
offset[0, :] = eps
f_plus_eps = loss.pointwise_loss(y_true, raw_predictions + offset)
f_minus_eps = loss.pointwise_loss(y_true, raw_predictions - offset)
f = loss.pointwise_loss(y_true, raw_predictions)
numerical_hessians = (f_plus_eps + f_minus_eps - 2 * f) / eps**2
assert_allclose(numerical_gradients, gradients, rtol=1e-4, atol=1e-7)
assert_allclose(numerical_hessians, hessians, rtol=1e-4, atol=1e-7)
def test_baseline_least_squares():
rng = np.random.RandomState(0)
loss = _LOSSES['least_squares'](sample_weight=None)
y_train = rng.normal(size=100)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
# Make sure baseline prediction is the mean of all targets
assert_almost_equal(baseline_prediction, y_train.mean())
assert np.allclose(loss.inverse_link_function(baseline_prediction),
baseline_prediction)
def test_baseline_least_absolute_deviation():
rng = np.random.RandomState(0)
loss = _LOSSES['least_absolute_deviation'](sample_weight=None)
y_train = rng.normal(size=100)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
# Make sure baseline prediction is the median of all targets
assert np.allclose(loss.inverse_link_function(baseline_prediction),
baseline_prediction)
assert baseline_prediction == pytest.approx(np.median(y_train))
def test_baseline_poisson():
rng = np.random.RandomState(0)
loss = _LOSSES['poisson'](sample_weight=None)
y_train = rng.poisson(size=100).astype(np.float64)
# Sanity check, make sure at least one sample is non-zero so we don't take
# log(0)
assert y_train.sum() > 0
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert np.isscalar(baseline_prediction)
assert baseline_prediction.dtype == y_train.dtype
assert_all_finite(baseline_prediction)
# Make sure baseline prediction produces the log of the mean of all targets
assert_almost_equal(np.log(y_train.mean()), baseline_prediction)
# Test baseline for y_true = 0
y_train.fill(0.)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert_all_finite(baseline_prediction)
def test_baseline_binary_crossentropy():
rng = np.random.RandomState(0)
loss = _LOSSES['binary_crossentropy'](sample_weight=None)
for y_train in (np.zeros(shape=100), np.ones(shape=100)):
y_train = y_train.astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert_all_finite(baseline_prediction)
assert np.allclose(loss.inverse_link_function(baseline_prediction),
y_train[0])
# Make sure baseline prediction is equal to link_function(p), where p
# is the proba of the positive class. We want predict_proba() to return p,
# and by definition
# p = inverse_link_function(raw_prediction) = sigmoid(raw_prediction)
# So we want raw_prediction = link_function(p) = log(p / (1 - p))
y_train = rng.randint(0, 2, size=100).astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
p = y_train.mean()
assert np.allclose(baseline_prediction, np.log(p / (1 - p)))
def test_baseline_categorical_crossentropy():
rng = np.random.RandomState(0)
prediction_dim = 4
loss = _LOSSES['categorical_crossentropy'](sample_weight=None)
for y_train in (np.zeros(shape=100), np.ones(shape=100)):
y_train = y_train.astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None,
prediction_dim)
assert baseline_prediction.dtype == y_train.dtype
assert_all_finite(baseline_prediction)
# Same logic as for above test. Here inverse_link_function = softmax and
# link_function = log
y_train = rng.randint(0, prediction_dim + 1, size=100).astype(np.float32)
baseline_prediction = loss.get_baseline_prediction(y_train, None,
prediction_dim)
assert baseline_prediction.shape == (prediction_dim, 1)
for k in range(prediction_dim):
p = (y_train == k).mean()
assert np.allclose(baseline_prediction[k, :], np.log(p))
@pytest.mark.parametrize('loss, problem', [
('least_squares', 'regression'),
('least_absolute_deviation', 'regression'),
('binary_crossentropy', 'classification'),
('categorical_crossentropy', 'classification'),
('poisson', 'poisson_regression'),
])
@pytest.mark.parametrize('sample_weight', ['ones', 'random'])
def test_sample_weight_multiplies_gradients(loss, problem, sample_weight):
# Make sure that passing sample weights to the gradient and hessians
# computation methods is equivalent to multiplying by the weights.
rng = np.random.RandomState(42)
n_samples = 1000
if loss == 'categorical_crossentropy':
n_classes = prediction_dim = 3
else:
n_classes = prediction_dim = 1
if problem == 'regression':
y_true = rng.normal(size=n_samples).astype(Y_DTYPE)
elif problem == 'poisson_regression':
y_true = rng.poisson(size=n_samples).astype(Y_DTYPE)
else:
y_true = rng.randint(0, n_classes, size=n_samples).astype(Y_DTYPE)
if sample_weight == 'ones':
sample_weight = np.ones(shape=n_samples, dtype=Y_DTYPE)
else:
sample_weight = rng.normal(size=n_samples).astype(Y_DTYPE)
loss_ = _LOSSES[loss](sample_weight=sample_weight)
baseline_prediction = loss_.get_baseline_prediction(
y_true, None, prediction_dim
)
raw_predictions = np.zeros(shape=(prediction_dim, n_samples),
dtype=baseline_prediction.dtype)
raw_predictions += baseline_prediction
gradients = np.empty(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
hessians = np.ones(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
loss_.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
gradients_sw = np.empty(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
hessians_sw = np.ones(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
loss_.update_gradients_and_hessians(gradients_sw, hessians_sw, y_true,
raw_predictions, sample_weight)
assert np.allclose(gradients * sample_weight, gradients_sw)
assert np.allclose(hessians * sample_weight, hessians_sw)
def test_init_gradient_and_hessians_sample_weight():
# Make sure that passing sample_weight to a loss correctly influences the
# hessians_are_constant attribute, and consequently the shape of the
# hessians array.
prediction_dim = 2
n_samples = 5
sample_weight = None
loss = _LOSSES['least_squares'](sample_weight=sample_weight)
_, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=prediction_dim,
sample_weight=None)
assert loss.hessians_are_constant
assert hessians.shape == (1, 1)
sample_weight = np.ones(n_samples)
loss = _LOSSES['least_squares'](sample_weight=sample_weight)
_, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=prediction_dim,
sample_weight=sample_weight)
assert not loss.hessians_are_constant
assert hessians.shape == (prediction_dim, n_samples)
| bsd-3-clause |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/tests/test_style.py | 7 | 4469 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import shutil
import tempfile
from contextlib import contextmanager
from nose import SkipTest
from nose.tools import assert_raises
from nose.plugins.attrib import attr
import matplotlib as mpl
from matplotlib import style
from matplotlib.style.core import USER_LIBRARY_PATHS, STYLE_EXTENSION
from matplotlib.externals import six
PARAM = 'image.cmap'
VALUE = 'pink'
DUMMY_SETTINGS = {PARAM: VALUE}
@contextmanager
def temp_style(style_name, settings=None):
"""Context manager to create a style sheet in a temporary directory."""
settings = DUMMY_SETTINGS
temp_file = '%s.%s' % (style_name, STYLE_EXTENSION)
# Write style settings to file in the temp directory.
tempdir = tempfile.mkdtemp()
with open(os.path.join(tempdir, temp_file), 'w') as f:
for k, v in six.iteritems(settings):
f.write('%s: %s' % (k, v))
# Add temp directory to style path and reload so we can access this style.
USER_LIBRARY_PATHS.append(tempdir)
style.reload_library()
try:
yield
finally:
shutil.rmtree(tempdir)
style.reload_library()
def test_available():
with temp_style('_test_', DUMMY_SETTINGS):
assert '_test_' in style.available
def test_use():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
@attr('network')
def test_use_url():
with temp_style('test', DUMMY_SETTINGS):
with style.context('https://gist.github.com/adrn/6590261/raw'):
assert mpl.rcParams['axes.facecolor'] == "#adeade"
def test_context():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
# Check that this value is reset after the exiting the context.
assert mpl.rcParams[PARAM] == 'gray'
def test_context_with_dict():
original_value = 'gray'
other_value = 'blue'
mpl.rcParams[PARAM] = original_value
with style.context({PARAM: other_value}):
assert mpl.rcParams[PARAM] == other_value
assert mpl.rcParams[PARAM] == original_value
def test_context_with_dict_after_namedstyle():
# Test dict after style name where dict modifies the same parameter.
original_value = 'gray'
other_value = 'blue'
mpl.rcParams[PARAM] = original_value
with temp_style('test', DUMMY_SETTINGS):
with style.context(['test', {PARAM: other_value}]):
assert mpl.rcParams[PARAM] == other_value
assert mpl.rcParams[PARAM] == original_value
def test_context_with_dict_before_namedstyle():
# Test dict before style name where dict modifies the same parameter.
original_value = 'gray'
other_value = 'blue'
mpl.rcParams[PARAM] = original_value
with temp_style('test', DUMMY_SETTINGS):
with style.context([{PARAM: other_value}, 'test']):
assert mpl.rcParams[PARAM] == VALUE
assert mpl.rcParams[PARAM] == original_value
def test_context_with_union_of_dict_and_namedstyle():
# Test dict after style name where dict modifies the a different parameter.
original_value = 'gray'
other_param = 'text.usetex'
other_value = True
d = {other_param: other_value}
mpl.rcParams[PARAM] = original_value
mpl.rcParams[other_param] = (not other_value)
with temp_style('test', DUMMY_SETTINGS):
with style.context(['test', d]):
assert mpl.rcParams[PARAM] == VALUE
assert mpl.rcParams[other_param] == other_value
assert mpl.rcParams[PARAM] == original_value
assert mpl.rcParams[other_param] == (not other_value)
def test_context_with_badparam():
if sys.version_info[:2] >= (2, 7):
from collections import OrderedDict
else:
m = "Test can only be run in Python >= 2.7 as it requires OrderedDict"
raise SkipTest(m)
original_value = 'gray'
other_value = 'blue'
d = OrderedDict([(PARAM, original_value), ('badparam', None)])
with style.context({PARAM: other_value}):
assert mpl.rcParams[PARAM] == other_value
x = style.context([d])
assert_raises(KeyError, x.__enter__)
assert mpl.rcParams[PARAM] == other_value
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| mit |
hetland/xray | xray/test/test_backends.py | 1 | 34197 | from io import BytesIO
from threading import Lock
import contextlib
import itertools
import os.path
import pickle
import shutil
import tempfile
import unittest
import sys
import numpy as np
import pandas as pd
import xray
from xray import Dataset, open_dataset, open_mfdataset, backends, save_mfdataset
from xray.backends.common import robust_getitem
from xray.core.pycompat import iteritems, PY3
from . import (TestCase, requires_scipy, requires_netCDF4, requires_pydap,
requires_scipy_or_netCDF4, requires_dask, requires_h5netcdf,
has_netCDF4, has_scipy)
from .test_dataset import create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
try:
import dask
import dask.array as da
except ImportError:
pass
def open_example_dataset(name, *args, **kwargs):
return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name),
*args, **kwargs)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2])
encoding = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1), 'dtype': 'i2'}
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1)}
return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)})
class TestCommon(TestCase):
def test_robust_getitem(self):
class UnreliableArrayFailure(Exception):
pass
class UnreliableArray(object):
def __init__(self, array, failures=1):
self.array = array
self.failures = failures
def __getitem__(self, key):
if self.failures > 0:
self.failures -= 1
raise UnreliableArrayFailure
return self.array[key]
array = UnreliableArray([0])
with self.assertRaises(UnreliableArrayFailure):
array[0]
self.assertEqual(array[0], 0)
actual = robust_getitem(array, 0, catch=UnreliableArrayFailure,
initial_delay=0)
self.assertEqual(actual, 0)
class Only32BitTypes(object):
pass
class DatasetIOTestCases(object):
def create_store(self):
raise NotImplementedError
def roundtrip(self, data, **kwargs):
raise NotImplementedError
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected['float_var'] = ([], 1.0e9, {'units': 'units of awesome'})
expected['string_var'] = ([], np.array('foobar', dtype='S'))
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
actual = xray.decode_cf(store)
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_load(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for v in actual.values():
self.assertFalse(v._in_memory)
yield actual
for k, v in actual.items():
if k in vars:
self.assertTrue(v._in_memory)
self.assertDatasetAllClose(expected, actual)
with self.assertRaises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load()
with assert_loads(['var1', 'dim1', 'dim2']) as ds:
ds['var1'].load()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load()
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_None_variable(self):
expected = Dataset({None: (('x', 'y'), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
letters = np.array(['ab', 'cdef', 'g'], dtype=object)
letters_nans = np.array(['ab', 'cdef', np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset({'floats': ('a', floats),
'floats_nans': ('a', floats_nans),
'letters': ('b', letters),
'letters_nans': ('b', letters_nans),
'all_nans': ('c', all_nans),
'nan': ([], np.nan)})
expected = original.copy(deep=True)
if isinstance(self, Only32BitTypes):
# for netCDF3 tests, expect the results to come back as characters
expected['letters_nans'] = expected['letters_nans'].astype('S')
expected['letters'] = expected['letters'].astype('S')
with self.roundtrip(original) as actual:
try:
self.assertDatasetIdentical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't
# first try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# ScipyDataTest, NetCDF3ViaNetCDF4DataTest and NetCDF4DataTest
# all end up using this case.
expected['letters_nans'][-1] = ''
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({'x': ('t', ['ab', 'cdef'])})
with self.roundtrip(expected) as actual:
if isinstance(self, Only32BitTypes):
expected['x'] = expected['x'].astype('S')
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_datetime_data(self):
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times), 't0': times[0]})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(['1h', '2h', 'NaT'])
expected = Dataset({'td': ('td', time_deltas), 'td0': time_deltas[0]})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
expected = open_example_dataset('example_1.nc')
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
self.assertDatasetEqual(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset({'foo': ('x', [0, 1])},
{'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(original, actual)
expected = original.drop('foo')
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
expected = original.copy()
expected.attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
expected = original.copy(deep=True)
expected['foo'].attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {'dim1': np.arange(3), 'dim2': np.arange(4),
'dim3': np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
def test_pickle(self):
on_disk = open_example_dataset('bears.nc')
unpickled = pickle.loads(pickle.dumps(on_disk))
self.assertDatasetIdentical(on_disk, unpickled)
class CFEncodedDataTest(DatasetIOTestCases):
def test_roundtrip_strings_with_fill_value(self):
values = np.array(['ab', 'cdef', np.nan], dtype=object)
encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')}
original = Dataset({'x': ('t', values, {}, encoding)})
expected = original.copy(deep=True)
expected['x'][:2] = values[:2].astype('S')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})})
if not isinstance(self, Only32BitTypes):
# these stores can save unicode strings
expected = original.copy(deep=True)
if type(self) in [NetCDF4DataTest, H5NetCDFDataTest]:
# netCDF4 can't keep track of an empty _FillValue for VLEN
# variables
expected['x'][-1] = ''
elif (type(self) is NetCDF3ViaNetCDF4DataTest
or (has_netCDF4 and type(self) is GenericNetCDFDataTest)):
# netCDF4 can't keep track of an empty _FillValue for nc3, either:
# https://github.com/Unidata/netcdf4-python/issues/273
expected['x'][-1] = np.string_('')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_mask_and_scale(self):
decoded = create_masked_and_scaled_data()
encoded = create_encoded_masked_and_scaled_data()
with self.roundtrip(decoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(decoded, decode_cf=False) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
self.assertDatasetAllClose(encoded, actual)
with self.roundtrip(encoded, decode_cf=False) as actual:
self.assertDatasetAllClose(encoded, actual)
# make sure roundtrip encoding didn't change the
# original dataset.
self.assertDatasetIdentical(encoded,
create_encoded_masked_and_scaled_data())
with self.roundtrip(encoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(encoded, decode_cf=False) as actual:
self.assertDatasetAllClose(encoded, actual)
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == 'lat lon' or obj == 'lon lat'
original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
{'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates']))
self.assertNotIn('coordinates', ds.attrs)
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
modified = original.drop(['temp', 'precip'])
with self.roundtrip(modified) as actual:
self.assertDatasetIdentical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds.attrs['coordinates']))
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
def test_roundtrip_endian(self):
ds = Dataset({'x': np.arange(3, 10, dtype='>i2'),
'y': np.arange(3, 20, dtype='<i4'),
'z': np.arange(3, 30, dtype='=i8'),
'w': ('x', np.arange(3, 10, dtype=np.float))})
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
self.assertDatasetIdentical(ds, actual)
if type(self) is NetCDF4DataTest:
ds['z'].encoding['endian'] = 'big'
with self.assertRaises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
_counter = itertools.count()
@contextlib.contextmanager
def create_tmp_file(suffix='.nc'):
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, 'temp-%s.%s' % (next(_counter), suffix))
try:
yield path
finally:
shutil.rmtree(temp_dir)
class BaseNetCDF4Test(CFEncodedDataTest):
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as rootgrp:
foogrp = rootgrp.createGroup('foo')
ds = foogrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo', '/foo', 'foo/', '/foo/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
# check that missing group raises appropriate exception
with self.assertRaises(IOError):
open_dataset(tmp_file, group='bar')
with self.assertRaisesRegexp(ValueError, 'must be a string'):
open_dataset(tmp_file, group=(1, 2, 3))
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, 'w')
foogrp = rootgrp.createGroup('foo')
bargrp = foogrp.createGroup('bar')
ds = bargrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
rootgrp.close()
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo/bar', '/foo/bar', 'foo/bar/', '/foo/bar/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
data1.to_netcdf(tmp_file, group='data/1')
data2.to_netcdf(tmp_file, group='data/2', mode='a')
with open_dataset(tmp_file, group='data/1') as actual1:
self.assertDatasetIdentical(data1, actual1)
with open_dataset(tmp_file, group='data/2') as actual2:
self.assertDatasetIdentical(data2, actual2)
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([['a', 'b', 'c'], ['d', 'e', 'f']], dtype='S')
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 2)
nc.createDimension('string3', 3)
v = nc.createVariable('x', np.dtype('S1'), ('x', 'string3'))
v[:] = values
values = np.array(['abc', 'def'], dtype='S')
expected = Dataset({'x': ('x', values)})
with open_dataset(tmp_file) as actual:
self.assertDatasetIdentical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
self.assertDatasetIdentical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({'x': np.array(['foo', 'zzzz'], dtype='S')})
with self.roundtrip(data) as actual:
self.assertDatasetIdentical(data, actual)
self.assertEqual(actual['x'].dtype, np.dtype('S4'))
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
with open_dataset(tmp_file) as actual:
self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = dict((k, v) for k, v in iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding, expected['time'].encoding)
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xray_dataset:
with create_tmp_file() as tmp_file2:
xray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, 'r') as ds:
self.assertEqual(ds.variables['time'].getncattr('units'), units)
self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data['var2'].encoding.update({'zlib': True,
'chunksizes': (5, 5),
'fletcher32': True})
with self.roundtrip(data) as actual:
for k, v in iteritems(data['var2'].encoding):
self.assertEqual(v, actual['var2'].encoding[k])
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('t', 5)
nc.createVariable('x', 'int16', ('t',), fill_value=-1)
v = nc.variables['x']
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data correctly
with nc4.Dataset(tmp_file, mode='r') as nc:
expected = np.ma.array([-1, -1, 10, 10.1, 10.2],
mask=[True, True, False, False, False])
actual = nc.variables['x'][:]
self.assertArrayEqual(expected, actual)
# now check xray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
self.assertDatasetIdentical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int16')
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({'x': ((), 123)})
self.assertDatasetIdentical(expected, ds)
def test_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(['foo', 'bar', 'baz'], dtype=object)
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 3)
v = nc.createVariable('x', str, ('x',))
v[:] = values
expected = Dataset({'x': ('x', values)})
for kwargs in [{}, {'decode_cf': True}]:
with open_dataset(tmp_file, **kwargs) as actual:
self.assertDatasetIdentical(expected, actual)
@requires_netCDF4
class NetCDF4DataTest(BaseNetCDF4Test, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with open_dataset(tmp_file, **kwargs) as ds:
yield ds
@requires_scipy
class ScipyInMemoryDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
serialized = data.to_netcdf()
with open_dataset(BytesIO(serialized), **kwargs) as ds:
yield ds
@requires_scipy
class ScipyOnDiskDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='scipy')
with open_dataset(tmp_file, engine='scipy', **kwargs) as ds:
yield ds
def test_array_attrs(self):
ds = Dataset(attrs={'foo': [[1, 2], [3, 4]]})
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
with self.roundtrip(ds) as roundtripped:
pass
def test_roundtrip_example_1_netcdf_gz(self):
if sys.version_info[:2] < (2, 7):
with self.assertRaisesRegexp(ValueError,
'gzipped netCDF not supported'):
open_example_dataset('example_1.nc.gz')
else:
with open_example_dataset('example_1.nc.gz') as expected:
with open_example_dataset('example_1.nc') as actual:
self.assertDatasetIdentical(expected, actual)
def test_netcdf3_endianness(self):
# regression test for GH416
expected = open_example_dataset('bears.nc', engine='scipy')
for var in expected.values():
self.assertTrue(var.dtype.isnative)
@requires_netCDF4
class NetCDF3ViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w',
format='NETCDF3_CLASSIC') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF3_CLASSIC',
engine='netcdf4')
with open_dataset(tmp_file, engine='netcdf4', **kwargs) as ds:
yield ds
@requires_scipy_or_netCDF4
class GenericNetCDFDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
def test_write_store(self):
# there's no specific store to test here
pass
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='netcdf3_64bit')
with open_dataset(tmp_file, **kwargs) as ds:
yield ds
def test_engine(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
data.to_netcdf('foo.nc', engine='foobar')
with self.assertRaisesRegexp(ValueError, 'invalid engine'):
data.to_netcdf(engine='netcdf4')
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
open_dataset(tmp_file, engine='foobar')
netcdf_bytes = data.to_netcdf()
with self.assertRaisesRegexp(ValueError, 'can only read'):
open_dataset(BytesIO(netcdf_bytes), engine='foobar')
def test_cross_engine_read_write(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add('netcdf4')
if has_scipy:
valid_engines.add('scipy')
for write_engine in valid_engines:
for format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format,
engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file,
engine=read_engine) as actual:
self.assertDatasetAllClose(data, actual)
@requires_h5netcdf
@requires_netCDF4
class H5NetCDFDataTest(BaseNetCDF4Test, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore(tmp_file, 'w')
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='h5netcdf')
with open_dataset(tmp_file, engine='h5netcdf', **kwargs) as ds:
yield ds
def test_orthogonal_indexing(self):
# doesn't work for h5py (without using dask as an intermediate layer)
pass
@requires_dask
@requires_netCDF4
class DaskTest(TestCase):
def test_open_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2]) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks,
((5, 5),))
self.assertDatasetAllClose(original, actual)
with open_mfdataset([tmp1, tmp2], chunks={'x': 3}) as actual:
self.assertEqual(actual.foo.variable.data.chunks,
((3, 2, 3, 2),))
with self.assertRaisesRegexp(IOError, 'no files to open'):
open_mfdataset('foo-bar-baz-*.nc')
def test_preprocess_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
preprocess = lambda ds: ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(tmp, preprocess=preprocess) as actual:
self.assertDatasetIdentical(expected, actual)
def test_lock(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks=10) as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertIsInstance(task[-1], type(Lock()))
with open_mfdataset(tmp) as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertIsInstance(task[-1], type(Lock()))
def test_save_mfdataset_roundtrip(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset([tmp1, tmp2]) as actual:
self.assertDatasetIdentical(actual, original)
def test_save_mfdataset_invalid(self):
ds = Dataset()
with self.assertRaisesRegexp(ValueError, 'cannot use mode'):
save_mfdataset([ds, ds], ['same', 'same'])
with self.assertRaisesRegexp(ValueError, 'same length'):
save_mfdataset([ds, ds], ['only one path'])
def test_open_and_do_math(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp) as ds:
actual = 1.0 * ds
self.assertDatasetAllClose(original, actual)
def test_open_dataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={'x': 5}) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),))
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp) as actual:
self.assertIsInstance(actual.foo.variable.data, np.ndarray)
self.assertDatasetIdentical(original, actual)
def test_dask_roundtrip(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {'dim1': 4, 'dim2': 4, 'dim3': 4, 'time': 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
self.assertDatasetIdentical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
self.assertDatasetIdentical(data, on_disk)
@requires_scipy_or_netCDF4
@requires_pydap
class PydapTest(TestCase):
def test_cmp_local_file(self):
url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc'
@contextlib.contextmanager
def create_datasets():
actual = open_dataset(url, engine='pydap')
with open_example_dataset('bears.nc') as expected:
# don't check attributes since pydap doesn't serialize them
# correctly also skip the "bears" variable since the test DAP
# server incorrectly concatenates it.
actual = actual.drop('bears')
expected = expected.drop('bears')
yield actual, expected
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual, expected)
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(l=2), expected.isel(l=2))
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(i=0, j=-1),
expected.isel(i=0, j=-1))
with create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(j=slice(1, 2)),
expected.isel(j=slice(1, 2)))
| apache-2.0 |
ericmjl/bokeh | examples/app/stocks/main.py | 1 | 4135 | ''' Create a simple stocks correlation dashboard.
Choose stocks to compare in the drop down widgets, and make selections
on the plots to update the summary and histograms accordingly.
.. note::
Running this example requires downloading sample data. See
the included `README`_ for more information.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve stocks
at your command prompt. Then navigate to the URL
http://localhost:5006/stocks
.. _README: https://github.com/bokeh/bokeh/blob/master/examples/app/stocks/README.md
'''
from functools import lru_cache
from os.path import dirname, join
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, PreText, Select
from bokeh.plotting import figure
DATA_DIR = join(dirname(__file__), 'daily')
DEFAULT_TICKERS = ['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
def nix(val, lst):
return [x for x in lst if x != val]
@lru_cache()
def load_ticker(ticker):
fname = join(DATA_DIR, 'table_%s.csv' % ticker.lower())
data = pd.read_csv(fname, header=None, parse_dates=['date'],
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'])
data = data.set_index('date')
return pd.DataFrame({ticker: data.c, ticker+'_returns': data.c.diff()})
@lru_cache()
def get_data(t1, t2):
df1 = load_ticker(t1)
df2 = load_ticker(t2)
data = pd.concat([df1, df2], axis=1)
data = data.dropna()
data['t1'] = data[t1]
data['t2'] = data[t2]
data['t1_returns'] = data[t1+'_returns']
data['t2_returns'] = data[t2+'_returns']
return data
# set up widgets
stats = PreText(text='', width=500)
ticker1 = Select(value='AAPL', options=nix('GOOG', DEFAULT_TICKERS))
ticker2 = Select(value='GOOG', options=nix('AAPL', DEFAULT_TICKERS))
# set up plots
source = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
source_static = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
tools = 'pan,wheel_zoom,xbox_select,reset'
corr = figure(plot_width=350, plot_height=350,
tools='pan,wheel_zoom,box_select,reset')
corr.circle('t1_returns', 't2_returns', size=2, source=source,
selection_color="orange", alpha=0.6, nonselection_alpha=0.1, selection_alpha=0.4)
ts1 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
ts1.line('date', 't1', source=source_static)
ts1.circle('date', 't1', size=1, source=source, color=None, selection_color="orange")
ts2 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
ts2.x_range = ts1.x_range
ts2.line('date', 't2', source=source_static)
ts2.circle('date', 't2', size=1, source=source, color=None, selection_color="orange")
# set up callbacks
def ticker1_change(attrname, old, new):
ticker2.options = nix(new, DEFAULT_TICKERS)
update()
def ticker2_change(attrname, old, new):
ticker1.options = nix(new, DEFAULT_TICKERS)
update()
def update(selected=None):
t1, t2 = ticker1.value, ticker2.value
df = get_data(t1, t2)
data = df[['t1', 't2', 't1_returns', 't2_returns']]
source.data = data
source_static.data = data
update_stats(df, t1, t2)
corr.title.text = '%s returns vs. %s returns' % (t1, t2)
ts1.title.text, ts2.title.text = t1, t2
def update_stats(data, t1, t2):
stats.text = str(data[[t1, t2, t1+'_returns', t2+'_returns']].describe())
ticker1.on_change('value', ticker1_change)
ticker2.on_change('value', ticker2_change)
def selection_change(attrname, old, new):
t1, t2 = ticker1.value, ticker2.value
data = get_data(t1, t2)
selected = source.selected.indices
if selected:
data = data.iloc[selected, :]
update_stats(data, t1, t2)
source.selected.on_change('indices', selection_change)
# set up layout
widgets = column(ticker1, ticker2, stats)
main_row = row(corr, widgets)
series = column(ts1, ts2)
layout = column(main_row, series)
# initialize
update()
curdoc().add_root(layout)
curdoc().title = "Stocks"
| bsd-3-clause |
Gillu13/scipy | doc/source/tutorial/examples/newton_krylov_preconditioning.py | 99 | 2489 | import numpy as np
from scipy.optimize import root
from scipy.sparse import spdiags, kron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = root(residual, guess, method='krylov',
options={'disp': True,
'jac_options': {'inner_M': M}})
print 'Residual', abs(residual(sol.x)).max()
print 'Evaluations', count[0]
return sol.x
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
dbracewell/pyHermes | tweet_analyze.py | 1 | 1469 | import csv
import regex as re
import gensim
import pandas as pd
hashtag_pattern = re.compile('#[^\s\p{P}]+')
dictionary = gensim.corpora.Dictionary()
texts = []
with open('/home/dbb/PycharmProjects/twitter_crawler/music.csv') as rdr:
csv = csv.reader(rdr)
for row in csv:
if len(row) > 0:
text = row[0]
tags = [t.lower() for t in hashtag_pattern.findall(text)]
if len(tags) > 0:
texts.append(dictionary.doc2bow(tags, allow_update=True))
lda_model = gensim.models.LdaModel(corpus=texts, id2word=dictionary, alpha='auto', num_topics=50, iterations=500)
for i in range(lda_model.num_topics):
print([x[0] for x in lda_model.show_topic(i)])
def topic_prob_extractor(gensim_hdp):
shown_topics = gensim_hdp.show_topics(num_topics=-1, formatted=False)
topics_nos = [x[0] for x in shown_topics]
weights = [sum([item[1] for item in shown_topics[topicN][1]]) for topicN in topics_nos]
return pd.DataFrame({'topic_id': topics_nos, 'weight': weights})
lda_model = gensim.models.HdpModel(corpus=texts, id2word=dictionary, T=20)
df = topic_prob_extractor(lda_model)
for row in df.iterrows():
print(row[1]['topic_id'], row[1]['weight'])
# for topic in lda_model.show_topics(num_topics=-1, num_words=10):
# id = topic[0]
# words = topic[1]
# wout = []
# for w in words.split(' '):
# if '*' in w:
# wout.append(w.split('*')[1])
# print(id, wout)
| apache-2.0 |
alexgorban/models | research/namignizer/data_utils.py | 19 | 4238 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for parsing Kaggle baby names files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import tensorflow as tf
import pandas as pd
# the default end of name rep will be zero
_EON = 0
def read_names(names_path):
"""read data from downloaded file. See SmallNames.txt for example format
or go to https://www.kaggle.com/kaggle/us-baby-names for full lists
Args:
names_path: path to the csv file similar to the example type
Returns:
Dataset: a namedtuple of two elements: deduped names and their associated
counts. The names contain only 26 chars and are all lower case
"""
names_data = pd.read_csv(names_path)
names_data.Name = names_data.Name.str.lower()
name_data = names_data.groupby(by=["Name"])["Count"].sum()
name_counts = np.array(name_data.tolist())
names_deduped = np.array(name_data.index.tolist())
Dataset = collections.namedtuple('Dataset', ['Name', 'Count'])
return Dataset(names_deduped, name_counts)
def _letter_to_number(letter):
"""converts letters to numbers between 1 and 27"""
# ord of lower case 'a' is 97
return ord(letter) - 96
def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size):
"""Takes a list of names and counts like those output from read_names, and
makes an iterator yielding a batch_size by num_steps array of random names
separated by an end of name token. The names are chosen randomly according
to their counts. The batch may end mid-name
Args:
names: a set of lowercase names composed of 26 characters
counts: a list of the frequency of those names
batch_size: int
num_steps: int
epoch_size: number of batches to yield
Yields:
(x, y): a batch_size by num_steps array of ints representing letters, where
x will be the input and y will be the target
"""
name_distribution = counts / counts.sum()
for i in range(epoch_size):
data = np.zeros(batch_size * num_steps + 1)
samples = np.random.choice(names, size=batch_size * num_steps // 2,
replace=True, p=name_distribution)
data_index = 0
for sample in samples:
if data_index >= batch_size * num_steps:
break
for letter in map(_letter_to_number, sample) + [_EON]:
if data_index >= batch_size * num_steps:
break
data[data_index] = letter
data_index += 1
x = data[:batch_size * num_steps].reshape((batch_size, num_steps))
y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps))
yield (x, y)
def name_to_batch(name, batch_size, num_steps):
""" Takes a single name and fills a batch with it
Args:
name: lowercase composed of 26 characters
batch_size: int
num_steps: int
Returns:
x, y: a batch_size by num_steps array of ints representing letters, where
x will be the input and y will be the target. The array is filled up
to the length of the string, the rest is filled with zeros
"""
data = np.zeros(batch_size * num_steps + 1)
data_index = 0
for letter in map(_letter_to_number, name) + [_EON]:
data[data_index] = letter
data_index += 1
x = data[:batch_size * num_steps].reshape((batch_size, num_steps))
y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps))
return x, y
| apache-2.0 |
lazywei/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 62 | 6762 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(data)
clf.set_params(penalty="l1")
clf.fit(X, y)
X_new = assert_warns(
DeprecationWarning, clf.transform, X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, y)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == y), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_warm_start():
est = PassiveAggressiveClassifier(warm_start=True, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
old_model = transformer.estimator_
transformer.fit(data, y)
new_model = transformer.estimator_
assert_true(old_model is new_model)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
evgchz/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
robbymeals/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
fungs/mglex | mglex/common.py | 1 | 34050 | # This file is subject to the terms and conditions of the GPLv3 (see file 'LICENSE' as part of this source code package)
u"""
This file contains helper functions and types.
"""
__author__ = "[email protected]"
from . import types
import numpy as np
import math
from numpy.testing import assert_approx_equal
from scipy.special import binom
from operator import itemgetter
from itertools import count, filterfalse, chain
from collections import defaultdict, deque
from sys import stderr, stdout
import pickle
from scipy.special import gammaln # TODO: clear dependency on scipy
def parse_lines(lines):
for line in lines:
if not line or line[0] == "#": # skip empty lines and comments
continue
yield line.rstrip()
load_data = lambda lines, store: store.parse(parse_lines(lines))
load_data_file = lambda filename, store: load_data(open(filename, "r"), store)
def assert_probmatrix(mat):
is_sum = mat.sum(dtype=np.float32)
should_sum = mat.shape[0]
assert_approx_equal(is_sum, should_sum, significant=0)
[assert_approx_equal(rowsum, 1., significant=1) for rowsum in mat.sum(axis=1, dtype=np.float32)]
def assert_probmatrix_relaxed(mat): # accepts matrices with all-nan rows (invalid training data for class etc.)
mask = ~np.all(np.isnan(mat), axis=1, keepdims=False)
mat = mat.compress(mask, axis=0)
assert_probmatrix(mat)
def approx_equal(v1, v2, precision):
if type(v1) == type(v2) == np.ndarray:
if v1.shape != v2.shape:
return False
return (abs(v1-v2) < precision).all()
return abs(v1-v2) < precision
assert_probarray = lambda v: assert_approx_equal(v.sum(), 1.)
def random_probarray(size): # TODO: refine
tmp = np.random.rand(size)
return tmp/tmp.sum()
def set_random_seed(seed):
np.random.seed(seed)
def argmax(s, n=1):
get_second = itemgetter(1)
max_store = sorted(list(enumerate(s[:n])), key=get_second, reverse=True)
for e in zip(count(n), s[n:]):
max_store = sorted(max_store + [e], key=get_second, reverse=True)[:n]
if n == 1:
return max_store[0]
return max_store
def logbinom(n, k):
return gammaln(n+1) - gammaln(k+1) - gammaln(n-k+1)
def logmultinom(n, k):
return gammaln(n+1) - np.sum(gammaln(k+1), axis=1, keepdims=True)
def nandot(a, b): # TODO: speed up, avoid copying data
"A numpy.dot() replacement which treats (0*-Inf)==0 and works around BLAS NaN bugs in matrices."
# important note: a contains zeros and b contains inf/-inf/nan, not the other way around
# workaround for zero*-inf=nan in dot product (must be 0 according to 0^0=1 with probabilities)
# 1) calculate dot product
# 2) select nan entries
# 3) re-calculate matrix entries where 0*inf = 0 using np.nansum()
tmp = np.dot(a, b)
indices = np.where(np.isnan(tmp))
ri, ci = indices
with np.errstate(invalid='ignore'):
values = np.nansum(a[ri, :] * b[:, ci].T, axis=1)
values[np.isnan(values)] = 0.0
tmp[indices] = values
return tmp
flat_priors = lambda n: np.repeat(1./n, n)
def total_likelihood_inplace(log_mat):
correction = np.max(log_mat, axis=1, keepdims=True) # tiny number correction
log_mat -= correction
l_per_datum = np.exp(log_mat).sum(axis=1)
log_vec = np.log(l_per_datum) + correction
return log_vec.sum()
def total_likelihood(log_mat):
correction = np.max(log_mat, axis=1, keepdims=True) # tiny number correction
tmp = log_mat - correction
l_per_datum = np.exp(tmp).sum(axis=1)
log_vec = np.log(l_per_datum) + correction
return log_vec.sum()
def exp_normalize_inplace(data): # important: works in-place
data -= data.max(axis=1, keepdims=True) # avoid tiny numbers
data = np.exp(data)
data /= data.asum(axis=1, keepdims=True)
return data
def exp_normalize(data):
ret = data - np.amax(data, axis=1, keepdims=True) # avoid tiny numbers
ret = np.exp(ret)
ret /= np.sum(ret, axis=1, keepdims=True)
return ret
def exp_normalize_1d_inplace(data): # important: works in-place
data -= data.max() # avoid tiny numbers
data = np.exp(data)
data /= data.sum()
return data
def exp_normalize_1d(data):
ret = data - data.max() # avoid tiny numbers
ret = np.exp(ret)
ret /= ret.sum()
return ret
swapindex_2d = [1, 0]
def weighted_std_matrix(data, weights, dtype=types.large_float_type, shrink_matrix=True): # TODO adjust return of NaN and zero
"""Weighted standard deviation using numpy masked arrays"""
assert weights.shape == data.shape
max_valid_value = np.floor(np.sqrt(np.finfo(data.dtype).max))
# print("max value:", np.abs(data).max(), file=stderr)
# shrink and copy original data
data_weighted_var = np.zeros(data.shape[1], dtype=dtype)
d = data
w = weights
m = ~np.logical_and(w, np.isfinite(d))
if shrink_matrix:
print(d.shape, w.shape, m.shape, file=stderr)
select = ~np.all(m, axis=1)
if np.any(select):
d = np.compress(select, d, axis=0)
w = np.compress(select, w, axis=0)
m = np.compress(select, m, axis=0)
print(d.shape, w.shape, m.shape, file=stderr)
select = ~np.all(m, axis=0)
if np.any(select):
d = np.compress(select, d, axis=1)
w = np.compress(select, w, axis=1)
m = np.compress(select, m, axis=1)
print(d.shape, w.shape, m.shape, file=stderr)
else:
d = d.copy()
select = np.ones(data.shape[1], dtype=np.bool_)
assert d.shape == m.shape
d = np.ma.MaskedArray(d, mask=m)
w = np.ma.MaskedArray(w, mask=m)
# print("max value:", np.ma.abs(d).max(fill_value=0.0), file=stderr)
# d -= np.ma.mean(d, dtype=types.large_float_type) # TODO: enable if overflow error in weighted mean calculation
# print("max value:", np.ma.abs(d).max(fill_value=0.0), file=stderr)
weight_sums = w.sum(dtype=types.large_float_type, axis=0)
# d -= np.ma.average(np.ma.MaskedArray(d, dtype=types.large_float_type), weights=w, axis=0) # TODO: avoid cast
with np.errstate(invalid='ignore'):
d -= np.ma.sum(d * w, axis=0)/weight_sums
# print("max value:", np.ma.abs(d).max(fill_value=0.0), file=stderr)
max_value = np.ma.abs(d).max(fill_value=0.0)
if max_value > max_valid_value:
shrink_divisor = max_value/(max_valid_value-1.0)
# print("shrink divisor:", shrink_divisor, file=stderr)
with np.errstate(invalid='ignore'):
d /= shrink_divisor
# print("max value after shrinking:", np.ma.abs(d).max(fill_value=0.0))
# print_probvector(d[~m].flatten(), file=stderr)
assert np.ma.abs(d).max() <= max_valid_value
else:
shrink_divisor = 1.0
try:
with np.errstate(over='raise'):
d **= 2
except FloatingPointError:
stderr.write("Error: overflow in squared vector.\n")
assert np.all(np.isfinite(d))
variance_divisor = weight_sums - ((w**2).sum(dtype=dtype, axis=0)/weight_sums) # replaces weight_sums in biased std
with np.errstate(invalid='raise'):
try:
# data_weighted_var = shrink_divisor**2 * np.ma.average(np.ma.array(d, dtype=types.large_float_type), weights=w, axis=0)
data_weighted_var[select] = np.ma.sqrt(np.ma.sum(d*w, axis=0)) * (shrink_divisor/np.sqrt(variance_divisor))
except FloatingPointError:
stderr.write("Error: probable overflow in np.average.\n")
raise FloatingPointError
assert np.all(data_weighted_var >= 0.0)
return data_weighted_var
def weighted_std_iterative(data, weights, dtype=types.large_float_type):
"""Unbiased weighted standard deviation using iteration over columns, returns NaN if number of valid samples is < 2"""
# unbiased version for reliabilty weights: https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
assert weights.shape == data.shape
axis = 0
original_dtype = data.dtype
if dtype is None:
dtype = original_dtype
axis = swapindex_2d[axis] # TODO: remove
max_valid_value = np.floor(np.sqrt(np.finfo(data.dtype).max))
data_weighted_var = np.empty(data.shape[axis], dtype=types.large_float_type)
# data_weighted_var_mask = np.empty(data.shape[axis], dtype=np.bool_)
for i, d, w in zip(count(0), np.rollaxis(data, axis), np.rollaxis(weights, axis)):
# ignore nan or infinity values
m = np.isfinite(d)
if sum(m) < 2:
data_weighted_var[i] = np.nan
# data_weighted_var_mask[i] = True
continue
np.logical_and(m, w, out=m)
d = d[m] # create memory copy
if d.size < 2:
data_weighted_var[i] = 0.0
continue
w = w[m] # create memory copy
weight_sum = w.sum(dtype=dtype)
if not weight_sum:
data_weighted_var[i] = 0.0
continue
# d -= np.mean(d, dtype=types.large_float_type) # TODO: enable if overflow error in weighted mean calculation
d -= (d*w).sum(dtype=dtype)/weight_sum
max_value = np.abs(d).max()
if max_value > max_valid_value:
shrink_divisor = max_value/(max_valid_value-1.0)
# print("shrink divisor:", shrink_divisor, file=stderr)
d /= shrink_divisor
# print("max value after shrinking:", np.abs(d).max())
assert np.ma.abs(d).max() <= max_valid_value
else:
shrink_divisor = 1.0
try:
with np.errstate(over='raise'):
# print("Min-Max square:", v.min(), v.max(), file=stderr)
d **= 2
except FloatingPointError:
stderr.write("Error in weighted variance calculation: overflow in squared vector.\n")
raise FloatingPointError
variance_divisor = weight_sum - ((w**2).sum(dtype=dtype)/weight_sum) # replaces weight_sums in biased std
with np.errstate(over='raise'):
try:
data_weighted_var[i] = np.sqrt((d*w).sum(dtype=dtype)) * (shrink_divisor/np.sqrt(variance_divisor)) #np.average(np.array(d, dtype=types.large_float_type), weights=w)
except FloatingPointError:
stderr.write("Error in weighted variance calculation: probable overflow in weights*coverage calculation.\n")
raise FloatingPointError
# data_weighted_var[i] = np.inf
assert data_weighted_var[i] >= 0.0
# data_weighted_var_mask[i] = False
# print_probvector(data_weighted_var, file=stderr)
# return np.ma.MaskedArray(data_weighted_var, mask=data_weighted_var_mask)
return data_weighted_var
weighted_std = weighted_std_iterative
def log_fac(i):
r = .0
while i > 0:
r += np.exp(i)
i -= 1
return r
def seeds2indices(seqnames, seeds): # TODO: deprecated -> remove
# a) build a dictionary for the seeds for fast lookup
name2cluster = {}
cluster_count = 0
for i, names in enumerate(seeds):
for n in names:
name2cluster[n] = i
cluster_count += 1
seed_indices = [set() for i in range(cluster_count)]
# b) determine indices of seeds
for i, name in enumerate(seqnames):
cluster_index = name2cluster.get(name, None)
if cluster_index is not None:
seed_indices[cluster_index].add(i)
return seed_indices
def responsibilities_from_seeds(seed_indices, num_data): # TODO: deprecated -> remove
responsibilities = np.zeros((num_data, len(seed_indices)), dtype=types.prob_type)
for i, s in enumerate(seed_indices):
responsibilities[list(s), i] = 1. # TODO: index with numpy array instead of list?
return responsibilities
def seeds2classindex(seeds):
name2cluster = {}
for i, names in enumerate(seeds):
for n in names:
name2cluster[n] = i
return name2cluster
def seeds2responsibility_iter(seqnames, seeds):
seeds = list(seeds)
lookup = seeds2classindex(seeds)
template = np.repeat(types.logprob_type('-inf'), len(seeds))
for name in seqnames:
index = lookup.get(name, None)
row = template.copy()
if index is not None:
row[index] = 0.
yield row
# def responsibilities_from_seeds(data, seeds):
# num_clusters = len(seeds)
# num_data = data.num_data
#
# # a) build a dictionary for the seeds for fast lookup
# name2cluster = {}
# for i, names in enumerate(seeds):
# for n in names:
# name2cluster[n] = i
#
# # b) construct zero-filled responsibility matrix
# responsibilities = np.zeros((num_data, num_clusters), dtype=prob_type)
#
# # c) fill in ones for seeds into responsibilities
# for name, row in zip(data.names, responsibilities):
# cluster_index = name2cluster.get(name, None)
# if cluster_index is not None:
# # print >>stderr, "assigning", name, "to cluster", cluster_index
# row[cluster_index] = 1.
#
# seqs_per_component = responsibilities.sum(axis=0)
# print >>stderr, "number of contigs per component", seqs_per_component
# assert(all(seqs_per_component))
#
# return responsibilities
def load_seeds(iterable):
for line in iterable:
if line and line[0] == "#": # TODO: factorize
continue
yield line.rstrip().split(" ")
load_seeds_file = lambda filename: load_seeds(open(filename, "r"))
load_seqlens_iter = lambda lines: (types.seqlen_type(line.rstrip()) for line in lines)
load_seqlens = lambda lines: np.fromiter(load_seqlens_iter(lines), dtype=types.seqlen_type)[:, np.newaxis]
load_seqlens_file = lambda filename: load_seqlens(open(filename, "r"))
load_seqnames_iter = lambda lines: (line.rstrip() for line in lines)
load_seqnames_file = lambda filename: load_seqnames_iter(open(filename, "r"))
load_model = pickle.load
load_model_file = lambda filename: load_model(open(filename, "rb"))
write_model = pickle.dump
write_model_file = lambda model, filename: write_model(model, open(filename, "wb"))
def load_matrix_iter(dtype):
return lambda lines: (np.array(line.split("\t"), dtype=dtype) for line in lines)
def load_matrix(lines, dtype):
return np.vstack(load_matrix_iter(dtype)(lines))
def load_matrix_file(filename, dtype):
return load_matrix(open(filename, "r"), dtype)
def write_matrix_iter(rows, file=stdout, precision=2, dtype=None, trans=None):
format = "%%.%if" % precision # TODO: use new format capabilities
if not trans:
trans = lambda x: x # identity
for row in map(lambda x: np.asarray(x, dtype), rows):
file.write("\t".join([format % i for i in trans(row)]))
file.write("\n")
def write_matrix(mat, dtype=None, **kw):
mat = np.asarray(mat, dtype=dtype)
write_matrix_iter(mat, dtype=None, **kw)
write_matrix_file = lambda mat, filename, **kw: write_matrix(mat, open(filename, "w"), **kw)
load_probmatrix_iter = load_matrix_iter(dtype=types.logprob_type)
load_probmatrix = lambda lines: -np.vstack(load_probmatrix_iter(lines))
load_probmatrix_file = lambda filename: load_probmatrix(open(filename, "r"))
write_probmatrix_iter = lambda rows, file=stdout, precision=2: write_matrix_iter(rows, file=file, precision=precision, dtype=types.logprob_type, trans=np.abs)
write_probmatrix = lambda mat, file=stdout, precision=2: write_matrix(mat, precision=precision, dtype=types.logprob_type, trans=np.abs)
write_probmatrix_file = lambda mat, filename, precision=2: write_probmatrix(mat, open(filename, "w"))
colors_dict = {
'automatic' : '#add8e6', # 173, 216, 230
'aliceblue' : '#f0f8ff', # 240, 248, 255
'antiquewhite' : '#faebd7', # 250, 235, 215
'aqua' : '#00ffff', # 0, 255, 255
'aquamarine' : '#7fffd4', # 127, 255, 212
'azure' : '#f0ffff', # 240, 255, 255
'beige' : '#f5f5dc', # 245, 245, 220
'bisque' : '#ffe4c4', # 255, 228, 196
'black' : '#000000', # 0, 0, 0
'blanchedalmond' : '#ffebcd', # 255, 235, 205
'blue' : '#0000ff', # 0, 0, 255
'blueviolet' : '#8a2be2', # 138, 43, 226
'brown' : '#a52a2a', # 165, 42, 42
'burlywood' : '#deb887', # 222, 184, 135
'cadetblue' : '#5f9ea0', # 95, 158, 160
'chartreuse' : '#7fff00', # 127, 255, 0
'chocolate' : '#d2691e', # 210, 105, 30
'coral' : '#ff7f50', # 255, 127, 80
'cornflowerblue' : '#6495ed', # 100, 149, 237
'cornsilk' : '#fff8dc', # 255, 248, 220
'crimson' : '#dc143c', # 220, 20, 60
'cyan' : '#00ffff', # 0, 255, 255
'darkblue' : '#00008b', # 0, 0, 139
'darkcyan' : '#008b8b', # 0, 139, 139
'darkgoldenrod' : '#b8860b', # 184, 134, 11
'darkgray' : '#a9a9a9', # 169, 169, 169
'darkgreen' : '#006400', # 0, 100, 0
'darkgrey' : '#a9a9a9', # 169, 169, 169
'darkkhaki' : '#bdb76b', # 189, 183, 107
'darkmagenta' : '#8b008b', # 139, 0, 139
'darkolivegreen' : '#556b2f', # 85, 107, 47
'darkorange' : '#ff8c00', # 255, 140, 0
'darkorchid' : '#9932cc', # 153, 50, 204
'darkred' : '#8b0000', # 139, 0, 0
'darksalmon' : '#e9967a', # 233, 150, 122
'darkseagreen' : '#8fbc8f', # 143, 188, 143
'darkslateblue' : '#483d8b', # 72, 61, 139
'darkslategray' : '#2f4f4f', # 47, 79, 79
'darkslategrey' : '#2f4f4f', # 47, 79, 79
'darkturquoise' : '#00ced1', # 0, 206, 209
'darkviolet' : '#9400d3', # 148, 0, 211
'deeppink' : '#ff1493', # 255, 20, 147
'deepskyblue' : '#00bfff', # 0, 191, 255
'dimgray' : '#696969', # 105, 105, 105
'dimgrey' : '#696969', # 105, 105, 105
'dodgerblue' : '#1e90ff', # 30, 144, 255
'firebrick' : '#b22222', # 178, 34, 34
'floralwhite' : '#fffaf0', # 255, 250, 240
'forestgreen' : '#228b22', # 34, 139, 34
'fuchsia' : '#ff00ff', # 255, 0, 255
'gainsboro' : '#dcdcdc', # 220, 220, 220
'ghostwhite' : '#f8f8ff', # 248, 248, 255
'gold' : '#ffd700', # 255, 215, 0
'goldenrod' : '#daa520', # 218, 165, 32
'gray' : '#808080', # 128, 128, 128
'green' : '#008000', # 0, 128, 0
'greenyellow' : '#adff2f', # 173, 255, 47
'grey' : '#808080', # 128, 128, 128
'honeydew' : '#f0fff0', # 240, 255, 240
'hotpink' : '#ff69b4', # 255, 105, 180
'indianred' : '#cd5c5c', # 205, 92, 92
'indigo' : '#4b0082', # 75, 0, 130
'ivory' : '#fffff0', # 255, 255, 240
'khaki' : '#f0e68c', # 240, 230, 140
'lavender' : '#e6e6fa', # 230, 230, 250
'lavenderblush' : '#fff0f5', # 255, 240, 245
'lawngreen' : '#7cfc00', # 124, 252, 0
'lemonchiffon' : '#fffacd', # 255, 250, 205
'lightblue' : '#add8e6', # 173, 216, 230
'lightcoral' : '#f08080', # 240, 128, 128
'lightcyan' : '#e0ffff', # 224, 255, 255
'lightgoldenrodyellow' : '#fafad2', # 250, 250, 210
'lightgray' : '#d3d3d3', # 211, 211, 211
'lightgreen' : '#90ee90', # 144, 238, 144
'lightgrey' : '#d3d3d3', # 211, 211, 211
'lightpink' : '#ffb6c1', # 255, 182, 193
'lightsalmon' : '#ffa07a', # 255, 160, 122
'lightseagreen' : '#20b2aa', # 32, 178, 170
'lightskyblue' : '#87cefa', # 135, 206, 250
'lightslategray' : '#778899', # 119, 136, 153
'lightslategrey' : '#778899', # 119, 136, 153
'lightsteelblue' : '#b0c4de', # 176, 196, 222
'lightyellow' : '#ffffe0', # 255, 255, 224
'lime' : '#00ff00', # 0, 255, 0
'limegreen' : '#32cd32', # 50, 205, 50
'linen' : '#faf0e6', # 250, 240, 230
'magenta' : '#ff00ff', # 255, 0, 255
'maroon' : '#800000', # 128, 0, 0
'mediumaquamarine' : '#66cdaa', # 102, 205, 170
'mediumblue' : '#0000cd', # 0, 0, 205
'mediumorchid' : '#ba55d3', # 186, 85, 211
'mediumpurple' : '#9370db', # 147, 112, 219
'mediumseagreen' : '#3cb371', # 60, 179, 113
'mediumslateblue' : '#7b68ee', # 123, 104, 238
'mediumspringgreen' : '#00fa9a', # 0, 250, 154
'mediumturquoise' : '#48d1cc', # 72, 209, 204
'mediumvioletred' : '#c71585', # 199, 21, 133
'midnightblue' : '#191970', # 25, 25, 112
'mintcream' : '#f5fffa', # 245, 255, 250
'mistyrose' : '#ffe4e1', # 255, 228, 225
'moccasin' : '#ffe4b5', # 255, 228, 181
'navajowhite' : '#ffdead', # 255, 222, 173
'navy' : '#000080', # 0, 0, 128
'oldlace' : '#fdf5e6', # 253, 245, 230
'olive' : '#808000', # 128, 128, 0
'olivedrab' : '#6b8e23', # 107, 142, 35
'orange' : '#ffa500', # 255, 165, 0
'orangered' : '#ff4500', # 255, 69, 0
'orchid' : '#da70d6', # 218, 112, 214
'palegoldenrod' : '#eee8aa', # 238, 232, 170
'palegreen' : '#98fb98', # 152, 251, 152
'paleturquoise' : '#afeeee', # 175, 238, 238
'palevioletred' : '#db7093', # 219, 112, 147
'papayawhip' : '#ffefd5', # 255, 239, 213
'peachpuff' : '#ffdab9', # 255, 218, 185
'peru' : '#cd853f', # 205, 133, 63
'pink' : '#ffc0cb', # 255, 192, 203
'plum' : '#dda0dd', # 221, 160, 221
'powderblue' : '#b0e0e6', # 176, 224, 230
'purple' : '#800080', # 128, 0, 128
'red' : '#ff0000', # 255, 0, 0
'rosybrown' : '#bc8f8f', # 188, 143, 143
'royalblue' : '#4169e1', # 65, 105, 225
'saddlebrown' : '#8b4513', # 139, 69, 19
'salmon' : '#fa8072', # 250, 128, 114
'sandybrown' : '#f4a460', # 244, 164, 96
'seagreen' : '#2e8b57', # 46, 139, 87
'seashell' : '#fff5ee', # 255, 245, 238
'sienna' : '#a0522d', # 160, 82, 45
'silver' : '#c0c0c0', # 192, 192, 192
'skyblue' : '#87ceeb', # 135, 206, 235
'slateblue' : '#6a5acd', # 106, 90, 205
'slategray' : '#708090', # 112, 128, 144
'slategrey' : '#708090', # 112, 128, 144
'snow' : '#fffafa', # 255, 250, 250
'springgreen' : '#00ff7f', # 0, 255, 127
'steelblue' : '#4682b4', # 70, 130, 180
'tan' : '#d2b48c', # 210, 180, 140
'teal' : '#008080', # 0, 128, 128
'thistle' : '#d8bfd8', # 216, 191, 216
'tomato' : '#ff6347', # 255, 99, 71
'turquoise' : '#40e0d0', # 64, 224, 208
'violet' : '#ee82ee', # 238, 130, 238
'wheat' : '#f5deb3', # 245, 222, 179
'white' : '#ffffff', # 255, 255, 255
'whitesmoke' : '#f5f5f5', # 245, 245, 245
'yellow' : '#ffff00', # 255, 255, 0
'yellowgreen' : '#9acd32' # 154, 205, 50
}
def plot_clusters_pca(responsibilities, color_groups):
from sklearn.decomposition import RandomizedPCA
import pylab as pl
from random import shuffle
colors = list(colors_dict.values())
shuffle(colors)
pca = RandomizedPCA(n_components=2)
X = pca.fit_transform(responsibilities)
# print >>stderr, pca.explained_variance_ratio_
pl.figure()
pl.scatter(X[:, 0], X[:, 1], c="grey", label="unknown")
for c, sub, i in zip(colors, color_groups, count(0)):
pl.scatter(X[sub, 0], X[sub, 1], c=c, label=str(i))
pl.legend()
pl.title("PCA responsibility matrix")
pl.show()
hellinger_distance = lambda u, v: -np.log(1.001-np.sqrt(((np.sqrt(u) - np.sqrt(v))**2.).sum())/np.sqrt(2.))
my_distance = lambda u, v: -np.log(1.0001 - np.dot(u, v))
dummy_distance = lambda u, v: 0.
def plot_clusters_igraph(responsibilities, color_groups):
from scipy.spatial.distance import pdist, correlation, squareform
from igraph import Graph, plot
data = responsibilities[:, :2]
Y = pdist(data, hellinger_distance)
print(Y[:30], file=stderr)
# return
g = Graph()
n = data.shape[0]
g.add_vertices(n)
colors = ["grey"]*n
palette = list(colors_dict.values())
for j, group in enumerate(color_groups):
c = palette[j]
for i in group:
colors[i] = c
l = g.layout_mds(dist=squareform(Y))
plot(g, layout=l, vertex_color=colors, bbox=(1024, 1024), vertex_size=5)
# c&p from stackexchange
def uniq(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def print_probmatrix(mat, file=stdout):
for row in np.asarray(mat):
file.write("\t".join(["%.2f" % i for i in row]))
file.write("\n")
pretty_probvector = lambda vec: "|".join(("%.2f" % f for f in vec))
pretty_probmatrix = lambda mat: "\n".join((pretty_probvector(row) for row in mat))
def print_probvector(vec, file=stdout):
file.write("|".join(("%.2f" % f for f in vec)))
file.write("\n")
def print_vector(vec, file=stdout):
file.write("|".join(("%s" % i for i in vec)))
file.write("\n")
def newline(file=stdout):
file.write("\n")
print_predictions = lambda mat: print_probmatrix(np.absolute(np.log(mat))) # TODO: add proper file sink
# debug function
def factorial_array(vec):
return np.asarray([np.math.factorial(i) for i in vec]) # superslow?
# debug function
def log_array(vec):
return np.asarray([math.log(i) for i in vec], dtype=float) # why not numpy.log?
binom_array = binom
class InternalTreeIndex:
def __init__(self):
self._store = defaultdict(self._context())
def __getitem__(self, itemseq):
current = self._store
for item in itemseq:
index, current = current[item]
yield index
def _context(self):
obj = self._convert_generator_functor(count())
return lambda: self._default_value(obj)
def _default_value(self, obj):
return obj(), defaultdict(self._context())
def items(self): # iterate breadth-first
stack = deque([(tuple(), tuple(), self._store)])
while stack:
prefix_ext, prefix_int, store = stack.popleft()
for node_ext, value in store.items():
node_int, store_next = value
path_ext = prefix_ext + (node_ext,)
path_int = prefix_int + (node_int,)
if store_next:
stack.append((path_ext, path_int, store_next))
yield path_ext, path_int
def keys(self): # iterate breadth-first
stack = deque([(tuple(), self._store)])
while stack:
prefix_ext, store = stack.popleft()
for node_ext, value in store.items():
store_next = value[1]
path_ext = prefix_ext + (node_ext,)
if store_next:
stack.append((path_ext, store_next))
yield path_ext
def values(self): # iterate breadth-first
stack = deque([(tuple(), self._store)])
while stack:
prefix_int, store = stack.popleft()[1:]
for node_int, store_next in store.values():
path_int = prefix_int + (node_int,)
if store_next:
stack.append((path_int, store_next))
yield path_int
_convert_generator_functor = staticmethod(lambda gen: lambda: next(gen))
class NestedCountIndex: # TODO: implement using NestedDict
def __init__(self):
self._store = self._fn()
self._size = 0
def _fn(self):
return defaultdict(self._fn)
def __getitem__(self, itemseq):
current = self._store
for item in itemseq:
current = current[item]
ret = current.get(self._defaultkey)
if ret is None:
ret = self._size
current[self._defaultkey] = ret
self._size += 1
return ret
def __len__(self):
return self._size
def keys(self): # iterate breadth-first
for path, val in self.items():
yield path
def _values_partial(self, queue): # iterate breadth-first
new = queue.popleft()
while new is not None: # invariant: level end
store = new
for node, val in store.items():
if node is self._defaultkey:
yield val
elif val:
queue.append(val)
new = queue.popleft()
raise StopIteration
def values_nested(self):
queue = deque([self._store])
while queue:
queue.append(None) # invariant: level end
yield self._values_partial(queue)
def values(self): # iterate breadth-first
return chain.from_iterable(self.values_nested())
def _items_partial(self, queue): # iterate breadth-first
new = queue.popleft()
while new is not None: # invariant: level end
prefix, store = new
for node, val in store.items():
if node is self._defaultkey:
yield prefix, val
elif val:
queue.append((prefix + (node,), val))
new = queue.popleft()
raise StopIteration
def items_nested(self):
queue = deque([(tuple(), self._store)])
while queue:
queue.append(None) # invariant: level end
yield self._items_partial(queue)
def items(self): # iterate breadth-first
return chain.from_iterable(self.items_nested())
_defaultkey = None
class NestedDict:
def __init__(self):
fn = lambda: defaultdict(fn)
self._store = fn()
def __getitem__(self, itemseq):
current = self._store
for item in itemseq:
current = current[item]
ret = current.get(self._defaultkey)
if ret is not None:
return ret
raise KeyError
def __setitem__(self, itemseq, value):
current = self._store
for item in itemseq:
current = current[item]
current[self._defaultkey] = value
def items(self): # iterate breadth-first
stack = deque([(tuple(), self._store)])
while stack:
prefix, store = stack.popleft()
for node, val in store.items():
if node is self._defaultkey:
yield prefix, val
elif val:
stack.append((prefix + (node,), val))
def keys(self): # iterate breadth-first
for path, val in self.items():
yield path
def values(self): # iterate breadth-first
stack = deque([self._store])
while stack:
store = stack.popleft()
for node, val in store.items():
if node is self._defaultkey:
yield val
elif val:
stack.append(val)
_defaultkey = None
class DefaultList(list):
"""A list class with default values designed for rare index misses (otherwise, don't use exceptions)"""
def __init__(self, fx):
self._fx = fx
def __setitem__(self, index, value):
try:
list.__setitem__(self, index, value)
except IndexError:
while len(self) < index:
self.append(self._fx())
self.append(value)
def __getitem__(self, index):
try:
list.__getitem__(self, index)
except IndexError:
while len(self) <= index:
self.append(self._fx())
return list.__getitem__(self, index)
def handle_broken_pipe():
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if __name__ == "__main__":
pass
| gpl-3.0 |
wesm/arrow | python/pyarrow/pandas_compat.py | 4 | 42243 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
from collections.abc import Sequence
from concurrent import futures
# import threading submodule upfront to avoid partially initialized
# module bug (ARROW-11983)
import concurrent.futures.thread # noqa
from copy import deepcopy
from itertools import zip_longest
import json
import operator
import re
import warnings
import numpy as np
import pyarrow as pa
from pyarrow.lib import _pandas_api, builtin_pickle, frombytes # noqa
_logical_type_map = {}
def get_logical_type_map():
global _logical_type_map
if not _logical_type_map:
_logical_type_map.update({
pa.lib.Type_NA: 'empty',
pa.lib.Type_BOOL: 'bool',
pa.lib.Type_INT8: 'int8',
pa.lib.Type_INT16: 'int16',
pa.lib.Type_INT32: 'int32',
pa.lib.Type_INT64: 'int64',
pa.lib.Type_UINT8: 'uint8',
pa.lib.Type_UINT16: 'uint16',
pa.lib.Type_UINT32: 'uint32',
pa.lib.Type_UINT64: 'uint64',
pa.lib.Type_HALF_FLOAT: 'float16',
pa.lib.Type_FLOAT: 'float32',
pa.lib.Type_DOUBLE: 'float64',
pa.lib.Type_DATE32: 'date',
pa.lib.Type_DATE64: 'date',
pa.lib.Type_TIME32: 'time',
pa.lib.Type_TIME64: 'time',
pa.lib.Type_BINARY: 'bytes',
pa.lib.Type_FIXED_SIZE_BINARY: 'bytes',
pa.lib.Type_STRING: 'unicode',
})
return _logical_type_map
def get_logical_type(arrow_type):
logical_type_map = get_logical_type_map()
try:
return logical_type_map[arrow_type.id]
except KeyError:
if isinstance(arrow_type, pa.lib.DictionaryType):
return 'categorical'
elif isinstance(arrow_type, pa.lib.ListType):
return 'list[{}]'.format(get_logical_type(arrow_type.value_type))
elif isinstance(arrow_type, pa.lib.TimestampType):
return 'datetimetz' if arrow_type.tz is not None else 'datetime'
elif isinstance(arrow_type, pa.lib.Decimal128Type):
return 'decimal'
return 'object'
_numpy_logical_type_map = {
np.bool_: 'bool',
np.int8: 'int8',
np.int16: 'int16',
np.int32: 'int32',
np.int64: 'int64',
np.uint8: 'uint8',
np.uint16: 'uint16',
np.uint32: 'uint32',
np.uint64: 'uint64',
np.float32: 'float32',
np.float64: 'float64',
'datetime64[D]': 'date',
np.unicode_: 'string',
np.bytes_: 'bytes',
}
def get_logical_type_from_numpy(pandas_collection):
try:
return _numpy_logical_type_map[pandas_collection.dtype.type]
except KeyError:
if hasattr(pandas_collection.dtype, 'tz'):
return 'datetimetz'
# See https://github.com/pandas-dev/pandas/issues/24739
if str(pandas_collection.dtype) == 'datetime64[ns]':
return 'datetime64[ns]'
result = _pandas_api.infer_dtype(pandas_collection)
if result == 'string':
return 'unicode'
return result
def get_extension_dtype_info(column):
dtype = column.dtype
if str(dtype) == 'category':
cats = getattr(column, 'cat', column)
assert cats is not None
metadata = {
'num_categories': len(cats.categories),
'ordered': cats.ordered,
}
physical_dtype = str(cats.codes.dtype)
elif hasattr(dtype, 'tz'):
metadata = {'timezone': pa.lib.tzinfo_to_string(dtype.tz)}
physical_dtype = 'datetime64[ns]'
else:
metadata = None
physical_dtype = str(dtype)
return physical_dtype, metadata
def get_column_metadata(column, name, arrow_type, field_name):
"""Construct the metadata for a given column
Parameters
----------
column : pandas.Series or pandas.Index
name : str
arrow_type : pyarrow.DataType
field_name : str
Equivalent to `name` when `column` is a `Series`, otherwise if `column`
is a pandas Index then `field_name` will not be the same as `name`.
This is the name of the field in the arrow Table's schema.
Returns
-------
dict
"""
logical_type = get_logical_type(arrow_type)
string_dtype, extra_metadata = get_extension_dtype_info(column)
if logical_type == 'decimal':
extra_metadata = {
'precision': arrow_type.precision,
'scale': arrow_type.scale,
}
string_dtype = 'object'
if name is not None and not isinstance(name, str):
raise TypeError(
'Column name must be a string. Got column {} of type {}'.format(
name, type(name).__name__
)
)
assert field_name is None or isinstance(field_name, str), \
str(type(field_name))
return {
'name': name,
'field_name': 'None' if field_name is None else field_name,
'pandas_type': logical_type,
'numpy_type': string_dtype,
'metadata': extra_metadata,
}
def construct_metadata(columns_to_convert, df, column_names, index_levels,
index_descriptors, preserve_index, types):
"""Returns a dictionary containing enough metadata to reconstruct a pandas
DataFrame as an Arrow Table, including index columns.
Parameters
----------
columns_to_convert : list[pd.Series]
df : pandas.DataFrame
index_levels : List[pd.Index]
index_descriptors : List[Dict]
preserve_index : bool
types : List[pyarrow.DataType]
Returns
-------
dict
"""
num_serialized_index_levels = len([descr for descr in index_descriptors
if not isinstance(descr, dict)])
# Use ntypes instead of Python shorthand notation [:-len(x)] as [:-0]
# behaves differently to what we want.
ntypes = len(types)
df_types = types[:ntypes - num_serialized_index_levels]
index_types = types[ntypes - num_serialized_index_levels:]
column_metadata = []
for col, sanitized_name, arrow_type in zip(columns_to_convert,
column_names, df_types):
metadata = get_column_metadata(col, name=sanitized_name,
arrow_type=arrow_type,
field_name=sanitized_name)
column_metadata.append(metadata)
index_column_metadata = []
if preserve_index is not False:
for level, arrow_type, descriptor in zip(index_levels, index_types,
index_descriptors):
if isinstance(descriptor, dict):
# The index is represented in a non-serialized fashion,
# e.g. RangeIndex
continue
metadata = get_column_metadata(level, name=level.name,
arrow_type=arrow_type,
field_name=descriptor)
index_column_metadata.append(metadata)
column_indexes = []
levels = getattr(df.columns, 'levels', [df.columns])
names = getattr(df.columns, 'names', [df.columns.name])
for level, name in zip(levels, names):
metadata = _get_simple_index_descriptor(level, name)
column_indexes.append(metadata)
else:
index_descriptors = index_column_metadata = column_indexes = []
return {
b'pandas': json.dumps({
'index_columns': index_descriptors,
'column_indexes': column_indexes,
'columns': column_metadata + index_column_metadata,
'creator': {
'library': 'pyarrow',
'version': pa.__version__
},
'pandas_version': _pandas_api.version
}).encode('utf8')
}
def _get_simple_index_descriptor(level, name):
string_dtype, extra_metadata = get_extension_dtype_info(level)
pandas_type = get_logical_type_from_numpy(level)
if 'mixed' in pandas_type:
warnings.warn(
"The DataFrame has column names of mixed type. They will be "
"converted to strings and not roundtrip correctly.",
UserWarning, stacklevel=4)
if pandas_type == 'unicode':
assert not extra_metadata
extra_metadata = {'encoding': 'UTF-8'}
return {
'name': name,
'field_name': name,
'pandas_type': pandas_type,
'numpy_type': string_dtype,
'metadata': extra_metadata,
}
def _column_name_to_strings(name):
"""Convert a column name (or level) to either a string or a recursive
collection of strings.
Parameters
----------
name : str or tuple
Returns
-------
value : str or tuple
Examples
--------
>>> name = 'foo'
>>> _column_name_to_strings(name)
'foo'
>>> name = ('foo', 'bar')
>>> _column_name_to_strings(name)
('foo', 'bar')
>>> import pandas as pd
>>> name = (1, pd.Timestamp('2017-02-01 00:00:00'))
>>> _column_name_to_strings(name)
('1', '2017-02-01 00:00:00')
"""
if isinstance(name, str):
return name
elif isinstance(name, bytes):
# XXX: should we assume that bytes in Python 3 are UTF-8?
return name.decode('utf8')
elif isinstance(name, tuple):
return str(tuple(map(_column_name_to_strings, name)))
elif isinstance(name, Sequence):
raise TypeError("Unsupported type for MultiIndex level")
elif name is None:
return None
return str(name)
def _index_level_name(index, i, column_names):
"""Return the name of an index level or a default name if `index.name` is
None or is already a column name.
Parameters
----------
index : pandas.Index
i : int
Returns
-------
name : str
"""
if index.name is not None and index.name not in column_names:
return index.name
else:
return '__index_level_{:d}__'.format(i)
def _get_columns_to_convert(df, schema, preserve_index, columns):
columns = _resolve_columns_of_interest(df, schema, columns)
if not df.columns.is_unique:
raise ValueError(
'Duplicate column names found: {}'.format(list(df.columns))
)
if schema is not None:
return _get_columns_to_convert_given_schema(df, schema, preserve_index)
column_names = []
index_levels = (
_get_index_level_values(df.index) if preserve_index is not False
else []
)
columns_to_convert = []
convert_fields = []
for name in columns:
col = df[name]
name = _column_name_to_strings(name)
if _pandas_api.is_sparse(col):
raise TypeError(
"Sparse pandas data (column {}) not supported.".format(name))
columns_to_convert.append(col)
convert_fields.append(None)
column_names.append(name)
index_descriptors = []
index_column_names = []
for i, index_level in enumerate(index_levels):
name = _index_level_name(index_level, i, column_names)
if (isinstance(index_level, _pandas_api.pd.RangeIndex) and
preserve_index is None):
descr = _get_range_index_descriptor(index_level)
else:
columns_to_convert.append(index_level)
convert_fields.append(None)
descr = name
index_column_names.append(name)
index_descriptors.append(descr)
all_names = column_names + index_column_names
# all_names : all of the columns in the resulting table including the data
# columns and serialized index columns
# column_names : the names of the data columns
# index_column_names : the names of the serialized index columns
# index_descriptors : descriptions of each index to be used for
# reconstruction
# index_levels : the extracted index level values
# columns_to_convert : assembled raw data (both data columns and indexes)
# to be converted to Arrow format
# columns_fields : specified column to use for coercion / casting
# during serialization, if a Schema was provided
return (all_names, column_names, index_column_names, index_descriptors,
index_levels, columns_to_convert, convert_fields)
def _get_columns_to_convert_given_schema(df, schema, preserve_index):
"""
Specialized version of _get_columns_to_convert in case a Schema is
specified.
In that case, the Schema is used as the single point of truth for the
table structure (types, which columns are included, order of columns, ...).
"""
column_names = []
columns_to_convert = []
convert_fields = []
index_descriptors = []
index_column_names = []
index_levels = []
for name in schema.names:
try:
col = df[name]
is_index = False
except KeyError:
try:
col = _get_index_level(df, name)
except (KeyError, IndexError):
# name not found as index level
raise KeyError(
"name '{}' present in the specified schema is not found "
"in the columns or index".format(name))
if preserve_index is False:
raise ValueError(
"name '{}' present in the specified schema corresponds "
"to the index, but 'preserve_index=False' was "
"specified".format(name))
elif (preserve_index is None and
isinstance(col, _pandas_api.pd.RangeIndex)):
raise ValueError(
"name '{}' is present in the schema, but it is a "
"RangeIndex which will not be converted as a column "
"in the Table, but saved as metadata-only not in "
"columns. Specify 'preserve_index=True' to force it "
"being added as a column, or remove it from the "
"specified schema".format(name))
is_index = True
name = _column_name_to_strings(name)
if _pandas_api.is_sparse(col):
raise TypeError(
"Sparse pandas data (column {}) not supported.".format(name))
field = schema.field(name)
columns_to_convert.append(col)
convert_fields.append(field)
column_names.append(name)
if is_index:
index_column_names.append(name)
index_descriptors.append(name)
index_levels.append(col)
all_names = column_names + index_column_names
return (all_names, column_names, index_column_names, index_descriptors,
index_levels, columns_to_convert, convert_fields)
def _get_index_level(df, name):
"""
Get the index level of a DataFrame given 'name' (column name in an arrow
Schema).
"""
key = name
if name not in df.index.names and _is_generated_index_name(name):
# we know we have an autogenerated name => extract number and get
# the index level positionally
key = int(name[len("__index_level_"):-2])
return df.index.get_level_values(key)
def _level_name(name):
# preserve type when default serializable, otherwise str it
try:
json.dumps(name)
return name
except TypeError:
return str(name)
def _get_range_index_descriptor(level):
# public start/stop/step attributes added in pandas 0.25.0
return {
'kind': 'range',
'name': _level_name(level.name),
'start': _pandas_api.get_rangeindex_attribute(level, 'start'),
'stop': _pandas_api.get_rangeindex_attribute(level, 'stop'),
'step': _pandas_api.get_rangeindex_attribute(level, 'step')
}
def _get_index_level_values(index):
n = len(getattr(index, 'levels', [index]))
return [index.get_level_values(i) for i in range(n)]
def _resolve_columns_of_interest(df, schema, columns):
if schema is not None and columns is not None:
raise ValueError('Schema and columns arguments are mutually '
'exclusive, pass only one of them')
elif schema is not None:
columns = schema.names
elif columns is not None:
columns = [c for c in columns if c in df.columns]
else:
columns = df.columns
return columns
def dataframe_to_types(df, preserve_index, columns=None):
(all_names,
column_names,
_,
index_descriptors,
index_columns,
columns_to_convert,
_) = _get_columns_to_convert(df, None, preserve_index, columns)
types = []
# If pandas knows type, skip conversion
for c in columns_to_convert:
values = c.values
if _pandas_api.is_categorical(values):
type_ = pa.array(c, from_pandas=True).type
elif _pandas_api.is_extension_array_dtype(values):
type_ = pa.array(c.head(0), from_pandas=True).type
else:
values, type_ = get_datetimetz_type(values, c.dtype, None)
type_ = pa.lib._ndarray_to_arrow_type(values, type_)
if type_ is None:
type_ = pa.array(c, from_pandas=True).type
types.append(type_)
metadata = construct_metadata(
columns_to_convert, df, column_names, index_columns,
index_descriptors, preserve_index, types
)
return all_names, types, metadata
def dataframe_to_arrays(df, schema, preserve_index, nthreads=1, columns=None,
safe=True):
(all_names,
column_names,
index_column_names,
index_descriptors,
index_columns,
columns_to_convert,
convert_fields) = _get_columns_to_convert(df, schema, preserve_index,
columns)
# NOTE(wesm): If nthreads=None, then we use a heuristic to decide whether
# using a thread pool is worth it. Currently the heuristic is whether the
# nrows > 100 * ncols and ncols > 1.
if nthreads is None:
nrows, ncols = len(df), len(df.columns)
if nrows > ncols * 100 and ncols > 1:
nthreads = pa.cpu_count()
else:
nthreads = 1
def convert_column(col, field):
if field is None:
field_nullable = True
type_ = None
else:
field_nullable = field.nullable
type_ = field.type
try:
result = pa.array(col, type=type_, from_pandas=True, safe=safe)
except (pa.ArrowInvalid,
pa.ArrowNotImplementedError,
pa.ArrowTypeError) as e:
e.args += ("Conversion failed for column {!s} with type {!s}"
.format(col.name, col.dtype),)
raise e
if not field_nullable and result.null_count > 0:
raise ValueError("Field {} was non-nullable but pandas column "
"had {} null values".format(str(field),
result.null_count))
return result
def _can_definitely_zero_copy(arr):
return (isinstance(arr, np.ndarray) and
arr.flags.contiguous and
issubclass(arr.dtype.type, np.integer))
if nthreads == 1:
arrays = [convert_column(c, f)
for c, f in zip(columns_to_convert, convert_fields)]
else:
arrays = []
with futures.ThreadPoolExecutor(nthreads) as executor:
for c, f in zip(columns_to_convert, convert_fields):
if _can_definitely_zero_copy(c.values):
arrays.append(convert_column(c, f))
else:
arrays.append(executor.submit(convert_column, c, f))
for i, maybe_fut in enumerate(arrays):
if isinstance(maybe_fut, futures.Future):
arrays[i] = maybe_fut.result()
types = [x.type for x in arrays]
if schema is None:
fields = []
for name, type_ in zip(all_names, types):
name = name if name is not None else 'None'
fields.append(pa.field(name, type_))
schema = pa.schema(fields)
pandas_metadata = construct_metadata(
columns_to_convert, df, column_names, index_columns,
index_descriptors, preserve_index, types
)
metadata = deepcopy(schema.metadata) if schema.metadata else dict()
metadata.update(pandas_metadata)
schema = schema.with_metadata(metadata)
return arrays, schema
def get_datetimetz_type(values, dtype, type_):
if values.dtype.type != np.datetime64:
return values, type_
if _pandas_api.is_datetimetz(dtype) and type_ is None:
# If no user type passed, construct a tz-aware timestamp type
tz = dtype.tz
unit = dtype.unit
type_ = pa.timestamp(unit, tz)
elif type_ is None:
# Trust the NumPy dtype
type_ = pa.from_numpy_dtype(values.dtype)
return values, type_
# ----------------------------------------------------------------------
# Converting pandas.DataFrame to a dict containing only NumPy arrays or other
# objects friendly to pyarrow.serialize
def dataframe_to_serialized_dict(frame):
block_manager = frame._data
blocks = []
axes = [ax for ax in block_manager.axes]
for block in block_manager.blocks:
values = block.values
block_data = {}
if _pandas_api.is_datetimetz(values.dtype):
block_data['timezone'] = pa.lib.tzinfo_to_string(values.tz)
if hasattr(values, 'values'):
values = values.values
elif _pandas_api.is_categorical(values):
block_data.update(dictionary=values.categories,
ordered=values.ordered)
values = values.codes
block_data.update(
placement=block.mgr_locs.as_array,
block=values
)
# If we are dealing with an object array, pickle it instead.
if values.dtype == np.dtype(object):
block_data['object'] = None
block_data['block'] = builtin_pickle.dumps(
values, protocol=builtin_pickle.HIGHEST_PROTOCOL)
blocks.append(block_data)
return {
'blocks': blocks,
'axes': axes
}
def serialized_dict_to_dataframe(data):
import pandas.core.internals as _int
reconstructed_blocks = [_reconstruct_block(block)
for block in data['blocks']]
block_mgr = _int.BlockManager(reconstructed_blocks, data['axes'])
return _pandas_api.data_frame(block_mgr)
def _reconstruct_block(item, columns=None, extension_columns=None):
"""
Construct a pandas Block from the `item` dictionary coming from pyarrow's
serialization or returned by arrow::python::ConvertTableToPandas.
This function takes care of converting dictionary types to pandas
categorical, Timestamp-with-timezones to the proper pandas Block, and
conversion to pandas ExtensionBlock
Parameters
----------
item : dict
For basic types, this is a dictionary in the form of
{'block': np.ndarray of values, 'placement': pandas block placement}.
Additional keys are present for other types (dictionary, timezone,
object).
columns :
Column names of the table being constructed, used for extension types
extension_columns : dict
Dictionary of {column_name: pandas_dtype} that includes all columns
and corresponding dtypes that will be converted to a pandas
ExtensionBlock.
Returns
-------
pandas Block
"""
import pandas.core.internals as _int
block_arr = item.get('block', None)
placement = item['placement']
if 'dictionary' in item:
cat = _pandas_api.categorical_type.from_codes(
block_arr, categories=item['dictionary'],
ordered=item['ordered'])
block = _int.make_block(cat, placement=placement)
elif 'timezone' in item:
dtype = make_datetimetz(item['timezone'])
block = _int.make_block(block_arr, placement=placement,
klass=_int.DatetimeTZBlock,
dtype=dtype)
elif 'object' in item:
block = _int.make_block(builtin_pickle.loads(block_arr),
placement=placement)
elif 'py_array' in item:
# create ExtensionBlock
arr = item['py_array']
assert len(placement) == 1
name = columns[placement[0]]
pandas_dtype = extension_columns[name]
if not hasattr(pandas_dtype, '__from_arrow__'):
raise ValueError("This column does not support to be converted "
"to a pandas ExtensionArray")
pd_ext_arr = pandas_dtype.__from_arrow__(arr)
block = _int.make_block(pd_ext_arr, placement=placement)
else:
block = _int.make_block(block_arr, placement=placement)
return block
def make_datetimetz(tz):
tz = pa.lib.string_to_tzinfo(tz)
return _pandas_api.datetimetz_type('ns', tz=tz)
# ----------------------------------------------------------------------
# Converting pyarrow.Table efficiently to pandas.DataFrame
def table_to_blockmanager(options, table, categories=None,
ignore_metadata=False, types_mapper=None):
from pandas.core.internals import BlockManager
all_columns = []
column_indexes = []
pandas_metadata = table.schema.pandas_metadata
if not ignore_metadata and pandas_metadata is not None:
all_columns = pandas_metadata['columns']
column_indexes = pandas_metadata.get('column_indexes', [])
index_descriptors = pandas_metadata['index_columns']
table = _add_any_metadata(table, pandas_metadata)
table, index = _reconstruct_index(table, index_descriptors,
all_columns)
ext_columns_dtypes = _get_extension_dtypes(
table, all_columns, types_mapper)
else:
index = _pandas_api.pd.RangeIndex(table.num_rows)
ext_columns_dtypes = _get_extension_dtypes(table, [], types_mapper)
_check_data_column_metadata_consistency(all_columns)
columns = _deserialize_column_index(table, all_columns, column_indexes)
blocks = _table_to_blocks(options, table, categories, ext_columns_dtypes)
axes = [columns, index]
return BlockManager(blocks, axes)
# Set of the string repr of all numpy dtypes that can be stored in a pandas
# dataframe (complex not included since not supported by Arrow)
_pandas_supported_numpy_types = {
str(np.dtype(typ))
for typ in (np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float'] +
['object', 'bool'])
}
def _get_extension_dtypes(table, columns_metadata, types_mapper=None):
"""
Based on the stored column pandas metadata and the extension types
in the arrow schema, infer which columns should be converted to a
pandas extension dtype.
The 'numpy_type' field in the column metadata stores the string
representation of the original pandas dtype (and, despite its name,
not the 'pandas_type' field).
Based on this string representation, a pandas/numpy dtype is constructed
and then we can check if this dtype supports conversion from arrow.
"""
ext_columns = {}
# older pandas version that does not yet support extension dtypes
if _pandas_api.extension_dtype is None:
return ext_columns
# infer the extension columns from the pandas metadata
for col_meta in columns_metadata:
name = col_meta['name']
dtype = col_meta['numpy_type']
if dtype not in _pandas_supported_numpy_types:
# pandas_dtype is expensive, so avoid doing this for types
# that are certainly numpy dtypes
pandas_dtype = _pandas_api.pandas_dtype(dtype)
if isinstance(pandas_dtype, _pandas_api.extension_dtype):
if hasattr(pandas_dtype, "__from_arrow__"):
ext_columns[name] = pandas_dtype
# infer from extension type in the schema
for field in table.schema:
typ = field.type
if isinstance(typ, pa.BaseExtensionType):
try:
pandas_dtype = typ.to_pandas_dtype()
except NotImplementedError:
pass
else:
ext_columns[field.name] = pandas_dtype
# use the specified mapping of built-in arrow types to pandas dtypes
if types_mapper:
for field in table.schema:
typ = field.type
pandas_dtype = types_mapper(typ)
if pandas_dtype is not None:
ext_columns[field.name] = pandas_dtype
return ext_columns
def _check_data_column_metadata_consistency(all_columns):
# It can never be the case in a released version of pyarrow that
# c['name'] is None *and* 'field_name' is not a key in the column metadata,
# because the change to allow c['name'] to be None and the change to add
# 'field_name' are in the same release (0.8.0)
assert all(
(c['name'] is None and 'field_name' in c) or c['name'] is not None
for c in all_columns
)
def _deserialize_column_index(block_table, all_columns, column_indexes):
column_strings = [frombytes(x) if isinstance(x, bytes) else x
for x in block_table.column_names]
if all_columns:
columns_name_dict = {
c.get('field_name', _column_name_to_strings(c['name'])): c['name']
for c in all_columns
}
columns_values = [
columns_name_dict.get(name, name) for name in column_strings
]
else:
columns_values = column_strings
# If we're passed multiple column indexes then evaluate with
# ast.literal_eval, since the column index values show up as a list of
# tuples
to_pair = ast.literal_eval if len(column_indexes) > 1 else lambda x: (x,)
# Create the column index
# Construct the base index
if not columns_values:
columns = _pandas_api.pd.Index(columns_values)
else:
columns = _pandas_api.pd.MultiIndex.from_tuples(
list(map(to_pair, columns_values)),
names=[col_index['name'] for col_index in column_indexes] or None,
)
# if we're reconstructing the index
if len(column_indexes) > 0:
columns = _reconstruct_columns_from_metadata(columns, column_indexes)
# ARROW-1751: flatten a single level column MultiIndex for pandas 0.21.0
columns = _flatten_single_level_multiindex(columns)
return columns
def _reconstruct_index(table, index_descriptors, all_columns):
# 0. 'field_name' is the name of the column in the arrow Table
# 1. 'name' is the user-facing name of the column, that is, it came from
# pandas
# 2. 'field_name' and 'name' differ for index columns
# 3. We fall back on c['name'] for backwards compatibility
field_name_to_metadata = {
c.get('field_name', c['name']): c
for c in all_columns
}
# Build up a list of index columns and names while removing those columns
# from the original table
index_arrays = []
index_names = []
result_table = table
for descr in index_descriptors:
if isinstance(descr, str):
result_table, index_level, index_name = _extract_index_level(
table, result_table, descr, field_name_to_metadata)
if index_level is None:
# ARROW-1883: the serialized index column was not found
continue
elif descr['kind'] == 'range':
index_name = descr['name']
index_level = _pandas_api.pd.RangeIndex(descr['start'],
descr['stop'],
step=descr['step'],
name=index_name)
if len(index_level) != len(table):
# Possibly the result of munged metadata
continue
else:
raise ValueError("Unrecognized index kind: {}"
.format(descr['kind']))
index_arrays.append(index_level)
index_names.append(index_name)
pd = _pandas_api.pd
# Reconstruct the row index
if len(index_arrays) > 1:
index = pd.MultiIndex.from_arrays(index_arrays, names=index_names)
elif len(index_arrays) == 1:
index = index_arrays[0]
if not isinstance(index, pd.Index):
# Box anything that wasn't boxed above
index = pd.Index(index, name=index_names[0])
else:
index = pd.RangeIndex(table.num_rows)
return result_table, index
def _extract_index_level(table, result_table, field_name,
field_name_to_metadata):
logical_name = field_name_to_metadata[field_name]['name']
index_name = _backwards_compatible_index_name(field_name, logical_name)
i = table.schema.get_field_index(field_name)
if i == -1:
# The serialized index column was removed by the user
return result_table, None, None
pd = _pandas_api.pd
col = table.column(i)
values = col.to_pandas().values
if hasattr(values, 'flags') and not values.flags.writeable:
# ARROW-1054: in pandas 0.19.2, factorize will reject
# non-writeable arrays when calling MultiIndex.from_arrays
values = values.copy()
if isinstance(col.type, pa.lib.TimestampType) and col.type.tz is not None:
index_level = make_tz_aware(pd.Series(values), col.type.tz)
else:
index_level = pd.Series(values, dtype=values.dtype)
result_table = result_table.remove_column(
result_table.schema.get_field_index(field_name)
)
return result_table, index_level, index_name
def _backwards_compatible_index_name(raw_name, logical_name):
"""Compute the name of an index column that is compatible with older
versions of :mod:`pyarrow`.
Parameters
----------
raw_name : str
logical_name : str
Returns
-------
result : str
Notes
-----
* Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager`
"""
# Part of table_to_blockmanager
if raw_name == logical_name and _is_generated_index_name(raw_name):
return None
else:
return logical_name
def _is_generated_index_name(name):
pattern = r'^__index_level_\d+__$'
return re.match(pattern, name) is not None
_pandas_logical_type_map = {
'date': 'datetime64[D]',
'datetime': 'datetime64[ns]',
'unicode': np.unicode_,
'bytes': np.bytes_,
'string': np.str_,
'integer': np.int64,
'floating': np.float64,
'empty': np.object_,
}
def _pandas_type_to_numpy_type(pandas_type):
"""Get the numpy dtype that corresponds to a pandas type.
Parameters
----------
pandas_type : str
The result of a call to pandas.lib.infer_dtype.
Returns
-------
dtype : np.dtype
The dtype that corresponds to `pandas_type`.
"""
try:
return _pandas_logical_type_map[pandas_type]
except KeyError:
if 'mixed' in pandas_type:
# catching 'mixed', 'mixed-integer' and 'mixed-integer-float'
return np.object_
return np.dtype(pandas_type)
def _get_multiindex_codes(mi):
# compat for pandas < 0.24 (MI labels renamed to codes).
if isinstance(mi, _pandas_api.pd.MultiIndex):
return mi.codes if hasattr(mi, 'codes') else mi.labels
else:
return None
def _reconstruct_columns_from_metadata(columns, column_indexes):
"""Construct a pandas MultiIndex from `columns` and column index metadata
in `column_indexes`.
Parameters
----------
columns : List[pd.Index]
The columns coming from a pyarrow.Table
column_indexes : List[Dict[str, str]]
The column index metadata deserialized from the JSON schema metadata
in a :class:`~pyarrow.Table`.
Returns
-------
result : MultiIndex
The index reconstructed using `column_indexes` metadata with levels of
the correct type.
Notes
-----
* Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager`
"""
pd = _pandas_api.pd
# Get levels and labels, and provide sane defaults if the index has a
# single level to avoid if/else spaghetti.
levels = getattr(columns, 'levels', None) or [columns]
labels = _get_multiindex_codes(columns) or [
pd.RangeIndex(len(level)) for level in levels
]
# Convert each level to the dtype provided in the metadata
levels_dtypes = [
(level, col_index.get('pandas_type', str(level.dtype)),
col_index.get('numpy_type', None))
for level, col_index in zip_longest(
levels, column_indexes, fillvalue={}
)
]
new_levels = []
encoder = operator.methodcaller('encode', 'UTF-8')
for level, pandas_dtype, numpy_dtype in levels_dtypes:
dtype = _pandas_type_to_numpy_type(pandas_dtype)
# Since our metadata is UTF-8 encoded, Python turns things that were
# bytes into unicode strings when json.loads-ing them. We need to
# convert them back to bytes to preserve metadata.
if dtype == np.bytes_:
level = level.map(encoder)
elif level.dtype != dtype:
level = level.astype(dtype)
# ARROW-9096: if original DataFrame was upcast we keep that
if level.dtype != numpy_dtype:
level = level.astype(numpy_dtype)
new_levels.append(level)
return pd.MultiIndex(new_levels, labels, names=columns.names)
def _table_to_blocks(options, block_table, categories, extension_columns):
# Part of table_to_blockmanager
# Convert an arrow table to Block from the internal pandas API
columns = block_table.column_names
result = pa.lib.table_to_blocks(options, block_table, categories,
list(extension_columns.keys()))
return [_reconstruct_block(item, columns, extension_columns)
for item in result]
def _flatten_single_level_multiindex(index):
pd = _pandas_api.pd
if isinstance(index, pd.MultiIndex) and index.nlevels == 1:
levels, = index.levels
labels, = _get_multiindex_codes(index)
# ARROW-9096: use levels.dtype to match cast with original DataFrame
dtype = levels.dtype
# Cheaply check that we do not somehow have duplicate column names
if not index.is_unique:
raise ValueError('Found non-unique column index')
return pd.Index(
[levels[_label] if _label != -1 else None for _label in labels],
dtype=dtype,
name=index.names[0]
)
return index
def _add_any_metadata(table, pandas_metadata):
modified_columns = {}
modified_fields = {}
schema = table.schema
index_columns = pandas_metadata['index_columns']
# only take index columns into account if they are an actual table column
index_columns = [idx_col for idx_col in index_columns
if isinstance(idx_col, str)]
n_index_levels = len(index_columns)
n_columns = len(pandas_metadata['columns']) - n_index_levels
# Add time zones
for i, col_meta in enumerate(pandas_metadata['columns']):
raw_name = col_meta.get('field_name')
if not raw_name:
# deal with metadata written with arrow < 0.8 or fastparquet
raw_name = col_meta['name']
if i >= n_columns:
# index columns
raw_name = index_columns[i - n_columns]
if raw_name is None:
raw_name = 'None'
idx = schema.get_field_index(raw_name)
if idx != -1:
if col_meta['pandas_type'] == 'datetimetz':
col = table[idx]
if not isinstance(col.type, pa.lib.TimestampType):
continue
metadata = col_meta['metadata']
if not metadata:
continue
metadata_tz = metadata.get('timezone')
if metadata_tz and metadata_tz != col.type.tz:
converted = col.to_pandas()
tz_aware_type = pa.timestamp('ns', tz=metadata_tz)
with_metadata = pa.Array.from_pandas(converted,
type=tz_aware_type)
modified_fields[idx] = pa.field(schema[idx].name,
tz_aware_type)
modified_columns[idx] = with_metadata
if len(modified_columns) > 0:
columns = []
fields = []
for i in range(len(table.schema)):
if i in modified_columns:
columns.append(modified_columns[i])
fields.append(modified_fields[i])
else:
columns.append(table[i])
fields.append(table.schema[i])
return pa.Table.from_arrays(columns, schema=pa.schema(fields))
else:
return table
# ----------------------------------------------------------------------
# Helper functions used in lib
def make_tz_aware(series, tz):
"""
Make a datetime64 Series timezone-aware for the given tz
"""
tz = pa.lib.string_to_tzinfo(tz)
series = (series.dt.tz_localize('utc')
.dt.tz_convert(tz))
return series
| apache-2.0 |
pyfa-org/Pyfa | graphs/gui/canvasPanel.py | 2 | 14794 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import itertools
import math
import os
import traceback
from bisect import bisect
# noinspection PyPackageRequirements
import wx
from logbook import Logger
from graphs.style import BASE_COLORS, LIGHTNESSES, STYLES, hsl_to_hsv
from gui.utils.numberFormatter import roundToPrec
pyfalog = Logger(__name__)
try:
import matplotlib as mpl
mpl_version = int(mpl.__version__[0]) or -1
if mpl_version >= 2:
mpl.use('wxagg')
graphFrame_enabled = True
else:
graphFrame_enabled = False
from matplotlib.lines import Line2D
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.figure import Figure
from matplotlib.colors import hsv_to_rgb
except ImportError as e:
pyfalog.warning('Matplotlib failed to import. Likely missing or incompatible version.')
graphFrame_enabled = False
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# We can get exceptions deep within matplotlib. Catch those. See GH #1046
tb = traceback.format_exc()
pyfalog.critical('Exception when importing Matplotlib. Continuing without importing.')
pyfalog.critical(tb)
graphFrame_enabled = False
class GraphCanvasPanel(wx.Panel):
def __init__(self, graphFrame, parent):
super().__init__(parent)
self.graphFrame = graphFrame
# Remove matplotlib font cache, see #234
try:
cache_dir = mpl._get_cachedir()
except (KeyboardInterrupt, SystemExit):
raise
except:
cache_dir = os.path.expanduser(os.path.join('~', '.matplotlib'))
cache_file = os.path.join(cache_dir, 'fontList.cache')
if os.access(cache_dir, os.W_OK | os.X_OK) and os.path.isfile(cache_file):
os.remove(cache_file)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.figure = Figure(figsize=(5, 3), tight_layout={'pad': 1.08})
rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()
clr = [c / 255. for c in rgbtuple]
self.figure.set_facecolor(clr)
self.figure.set_edgecolor(clr)
self.canvas = Canvas(self, -1, self.figure)
self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))
self.canvas.mpl_connect('button_press_event', self.OnMplCanvasClick)
self.subplot = self.figure.add_subplot(111)
self.subplot.grid(True)
mainSizer.Add(self.canvas, 1, wx.EXPAND | wx.ALL, 0)
self.SetSizer(mainSizer)
self.xMark = None
self.mplOnDragHandler = None
self.mplOnReleaseHandler = None
def draw(self, accurateMarks=True):
self.subplot.clear()
self.subplot.grid(True)
allXs = set()
allYs = set()
plotData = {}
legendData = []
chosenX = self.graphFrame.ctrlPanel.xType
chosenY = self.graphFrame.ctrlPanel.yType
self.subplot.set(
xlabel=self.graphFrame.ctrlPanel.formatLabel(chosenX),
ylabel=self.graphFrame.ctrlPanel.formatLabel(chosenY))
mainInput, miscInputs = self.graphFrame.ctrlPanel.getValues()
view = self.graphFrame.getView()
sources = self.graphFrame.ctrlPanel.sources
if view.hasTargets:
iterList = tuple(itertools.product(sources, self.graphFrame.ctrlPanel.targets))
else:
iterList = tuple((f, None) for f in sources)
# Draw plot lines and get data for legend
for source, target in iterList:
# Get line style data
try:
colorData = BASE_COLORS[source.colorID]
except KeyError:
pyfalog.warning('Invalid color "{}" for "{}"'.format(source.colorID, source.name))
continue
color = colorData.hsl
lineStyle = 'solid'
if target is not None:
try:
lightnessData = LIGHTNESSES[target.lightnessID]
except KeyError:
pyfalog.warning('Invalid lightness "{}" for "{}"'.format(target.lightnessID, target.name))
continue
color = lightnessData.func(color)
try:
lineStyleData = STYLES[target.lineStyleID]
except KeyError:
pyfalog.warning('Invalid line style "{}" for "{}"'.format(target.lightnessID, target.name))
continue
lineStyle = lineStyleData.mplSpec
color = hsv_to_rgb(hsl_to_hsv(color))
# Get point data
try:
xs, ys = view.getPlotPoints(
mainInput=mainInput,
miscInputs=miscInputs,
xSpec=chosenX,
ySpec=chosenY,
src=source,
tgt=target)
if not self.__checkNumbers(xs, ys):
pyfalog.warning('Failed to plot "{}" vs "{}" due to inf or NaN in values'.format(source.name, '' if target is None else target.name))
continue
plotData[(source, target)] = (xs, ys)
allXs.update(xs)
allYs.update(ys)
# If we have single data point, show marker - otherwise line won't be shown
if len(xs) == 1 and len(ys) == 1:
self.subplot.plot(xs, ys, color=color, linestyle=lineStyle, marker='.')
else:
self.subplot.plot(xs, ys, color=color, linestyle=lineStyle)
# Fill data for legend
if target is None:
legendData.append((color, lineStyle, source.shortName))
else:
legendData.append((color, lineStyle, '{} vs {}'.format(source.shortName, target.shortName)))
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pyfalog.warning('Failed to plot "{}" vs "{}"'.format(source.name, '' if target is None else target.name))
self.canvas.draw()
self.Refresh()
return
# Setting Y limits for canvas
if self.graphFrame.ctrlPanel.showY0:
allYs.add(0)
canvasMinY, canvasMaxY = self._getLimits(allYs, minExtra=0.05, maxExtra=0.1)
canvasMinX, canvasMaxX = self._getLimits(allXs, minExtra=0.02, maxExtra=0.02)
self.subplot.set_ylim(bottom=canvasMinY, top=canvasMaxY)
self.subplot.set_xlim(left=canvasMinX, right=canvasMaxX)
# Process X marks line
if self.xMark is not None:
minX = min(allXs, default=None)
maxX = max(allXs, default=None)
if minX is not None and maxX is not None:
minY = min(allYs, default=None)
maxY = max(allYs, default=None)
yDiff = (maxY or 0) - (minY or 0)
xMark = max(min(self.xMark, maxX), minX)
# If in top 10% of X coordinates, align labels differently
if xMark > canvasMinX + 0.9 * (canvasMaxX - canvasMinX):
labelAlignment = 'right'
labelPrefix = ''
labelSuffix = ' '
else:
labelAlignment = 'left'
labelPrefix = ' '
labelSuffix = ''
# Draw line
self.subplot.axvline(x=xMark, linestyle='dotted', linewidth=1, color=(0, 0, 0))
# Draw its X position
if chosenX.unit is None:
xLabel = '{}{}{}'.format(labelPrefix, roundToPrec(xMark, 4), labelSuffix)
else:
xLabel = '{}{} {}{}'.format(labelPrefix, roundToPrec(xMark, 4), chosenX.unit, labelSuffix)
self.subplot.annotate(
xLabel, xy=(xMark, canvasMaxY - 0.01 * (canvasMaxY - canvasMinY)), xytext=(0, 0), annotation_clip=False,
textcoords='offset pixels', ha=labelAlignment, va='top', fontsize='small')
# Get Y values
yMarks = set()
def addYMark(val):
if val is None:
return
# Round according to shown Y range - the bigger the range,
# the rougher the rounding
if yDiff != 0:
rounded = roundToPrec(val, 4, nsValue=yDiff)
else:
rounded = val
# If due to some bug or insufficient plot density we're
# out of bounds, do not add anything
if minY <= val <= maxY or minY <= rounded <= maxY:
yMarks.add(rounded)
for source, target in iterList:
xs, ys = plotData[(source, target)]
if not xs or xMark < min(xs) or xMark > max(xs):
continue
# Fetch values from graphs when we're asked to provide accurate data
if accurateMarks:
try:
y = view.getPoint(
x=xMark,
miscInputs=miscInputs,
xSpec=chosenX,
ySpec=chosenY,
src=source,
tgt=target)
addYMark(y)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pyfalog.warning('Failed to get X mark for "{}" vs "{}"'.format(source.name, '' if target is None else target.name))
# Silently skip this mark, otherwise other marks and legend display will fail
continue
# Otherwise just do linear interpolation between two points
else:
if xMark in xs:
# We might have multiples of the same value in our sequence, pick value for the last one
idx = len(xs) - xs[::-1].index(xMark) - 1
addYMark(ys[idx])
continue
idx = bisect(xs, xMark)
yMark = self._interpolateX(x=xMark, x1=xs[idx - 1], y1=ys[idx - 1], x2=xs[idx], y2=ys[idx])
addYMark(yMark)
# Draw Y values
for yMark in yMarks:
self.subplot.annotate(
'{}{}{}'.format(labelPrefix, yMark, labelSuffix), xy=(xMark, yMark), xytext=(0, 0),
textcoords='offset pixels', ha=labelAlignment, va='center', fontsize='small')
legendLines = []
for i, iData in enumerate(legendData):
color, lineStyle, label = iData
legendLines.append(Line2D([0], [0], color=color, linestyle=lineStyle, label=label.replace('$', '\$')))
if len(legendLines) > 0 and self.graphFrame.ctrlPanel.showLegend:
legend = self.subplot.legend(handles=legendLines)
for t in legend.get_texts():
t.set_fontsize('small')
for l in legend.get_lines():
l.set_linewidth(1)
self.canvas.draw()
self.Refresh()
def markXApproximate(self, x):
if x is not None:
self.xMark = x
self.draw(accurateMarks=False)
def unmarkX(self):
self.xMark = None
self.draw()
@staticmethod
def _getLimits(vals, minExtra=0, maxExtra=0):
minVal = min(vals, default=0)
maxVal = max(vals, default=0)
# Extend range a little for some visual space
valRange = maxVal - minVal
minVal -= valRange * minExtra
maxVal += valRange * maxExtra
# Extend by % of value if we show function of a constant
if minVal == maxVal:
minVal -= minVal * 0.05
maxVal += minVal * 0.05
# If still equal, function is 0, spread out visual space as special case
if minVal == maxVal:
minVal -= 5
maxVal += 5
return minVal, maxVal
@staticmethod
def _interpolateX(x, x1, y1, x2, y2):
pos = (x - x1) / (x2 - x1)
y = y1 + pos * (y2 - y1)
return y
@staticmethod
def __checkNumbers(xs, ys):
for number in itertools.chain(xs, ys):
if math.isnan(number) or math.isinf(number):
return False
return True
# Matplotlib event handlers
def OnMplCanvasClick(self, event):
if event.button == 1:
if not self.mplOnDragHandler:
self.mplOnDragHandler = self.canvas.mpl_connect('motion_notify_event', self.OnMplCanvasDrag)
if not self.mplOnReleaseHandler:
self.mplOnReleaseHandler = self.canvas.mpl_connect('button_release_event', self.OnMplCanvasRelease)
self.markXApproximate(event.xdata)
elif event.button == 3:
self.unmarkX()
def OnMplCanvasDrag(self, event):
self.markXApproximate(event.xdata)
def OnMplCanvasRelease(self, event):
if event.button == 1:
if self.mplOnDragHandler:
self.canvas.mpl_disconnect(self.mplOnDragHandler)
self.mplOnDragHandler = None
if self.mplOnReleaseHandler:
self.canvas.mpl_disconnect(self.mplOnReleaseHandler)
self.mplOnReleaseHandler = None
# Do not write markX here because of strange mouse behavior: when dragging,
# sometimes when you release button, x coordinate changes. To avoid that,
# we just re-use coordinates set on click/drag and just request to redraw
# using accurate data
self.draw(accurateMarks=True)
| gpl-3.0 |
bayesimpact/bob-emploi | data_analysis/importer/city_locations.py | 1 | 1341 | """Module to upload the French city locations to MongoDB."""
import typing
from typing import Any, Dict, List
import pandas
from bob_emploi.frontend.api import geo_pb2
from bob_emploi.data_analysis.lib import cleaned_data
from bob_emploi.data_analysis.lib import mongo
def csv2dicts(stats_filename: str, urban_context_filename: str) -> List[Dict[str, Any]]:
"""Prepare cities for upload to MongoDB.
Args:
stats_filename: path to a file containing stats about cities.
urban_context_filename: path to a file containing urban context
info for each cities.
Returns:
A list of dict JSON-like object compatible with the geo_pb2.FrenchCity
proto.
"""
city_stats = pandas.read_csv(
stats_filename,
sep=',', header=None, usecols=[10, 19, 20],
names=['_id', 'longitude', 'latitude'],
dtype={'_id': str, 'latitude': float, 'longitude': float})
city_stats.dropna()
urban_contexts = cleaned_data.french_urban_areas(filename=urban_context_filename)
city_stats['urbanContext'] = city_stats['_id'].map(urban_contexts.periurban)\
.fillna(geo_pb2.UNKNOWN_URBAN_CONTEXT).astype(int)
return typing.cast(List[Dict[str, Any]], city_stats.to_dict(orient='records'))
if __name__ == '__main__':
mongo.importer_main(csv2dicts, 'cities')
| gpl-3.0 |
scienceopen/gridaurora | gridaurora/loadtranscargrid.py | 1 | 2538 | """
load and plot transcar energy grid
Egrid is not what's used externally by other programs, but rather variable "bins"
"""
from pathlib import Path
import xarray
import numpy as np
from scipy.stats import linregress
from matplotlib.pyplot import figure
flux0 = 70114000000.0
Nold = 33
Nnew = 81 # 100MeV
def loadregress(fn: Path):
# %%
Egrid = np.loadtxt(Path(fn).expanduser(), delimiter=",")
# Ematt = asarray([logspace(1.7220248253079387,4.2082263059355824,num=Nold,base=10),
# #[logspace(3.9651086925197356,9.689799159992674,num=33,base=exp(1)),
# logspace(1.8031633895706722,4.2851520785250914,num=Nold,base=10)]).T
# %% log-lin regression
Enew = np.empty((Nnew, 4))
Enew[:Nold, :] = Egrid
for k in range(4):
s, i = linregress(range(Nold), np.log10(Egrid[:, k]))[:2]
Enew[Nold:, k] = 10 ** (np.arange(Nold, Nnew) * s + i)
return Enew
def doplot(fn: Path, bins: xarray.DataArray, Egrid: np.ndarray = None, debug: bool = False):
# %% main plot
ax = figure().gca()
ax.bar(
left=bins.loc[:, "low"], height=bins.loc[:, "flux"], width=bins.loc[:, "high"] - bins.loc[:, "low"],
)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel("flux [s$^{-1}$ sr$^{-1}$ cm$^{-2}$ eV$^{-1}$]")
ax.set_xlabel("bin energy [eV]")
ax.set_title(f"Input flux used to generate eigenprofiles, based on {fn}")
# %% debug plots
if debug:
ax = figure().gca()
bins[["low", "high"]].plot(logy=True, ax=ax, marker=".")
ax.set_xlabel("bin number")
ax.set_ylabel("bin energy [eV]")
ax = figure().gca()
bins["flux"].plot(logy=True, ax=ax, marker=".")
ax.set_xlabel("bin number")
ax.set_ylabel("flux [s$^{-1}$ sr$^{-1}$ cm$^{-2}$ eV$^{-1}$]")
if Egrid is not None:
ax = figure().gca()
ax.plot(Egrid, marker=".")
# ax.plot(Ematt,marker='.',color='k')
ax.set_yscale("log")
ax.set_ylabel("eV")
ax.legend(["E1", "E2", "pr1", "pr2"], loc="best")
def makebin(Egrid: np.ndarray):
E1 = Egrid[:, 0]
E2 = Egrid[:, 1]
pr1 = Egrid[:, 2]
pr2 = Egrid[:, 3]
dE = E2 - E1
Esum = E2 + E1
flux = flux0 / 0.5 / Esum / dE
Elow = E1 - 0.5 * (E1 - pr1)
Ehigh = E2 - 0.5 * (E2 - pr2)
E = np.column_stack((Elow, Ehigh, flux))
Ed = xarray.DataArray(data=E, dims=["energy", "type"])
Ed["type"] = ["low", "high", "flux"]
return Ed
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.