repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jzt5132/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
astocko/statsmodels | statsmodels/examples/ex_kde_confint.py | 34 | 1973 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 11:02:59 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.nonparametric.api as npar
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
# example from test_kde.py mixture of two normal distributions
np.random.seed(12345)
x = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1, scale=.5),dict(loc=1, scale=.5)))
x.sort() # not needed
kde = npar.KDEUnivariate(x)
kde.fit('gau')
ci = kde.kernel.density_confint(kde.density, len(x))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist(x, bins=15, normed=True, alpha=0.25)
ax.plot(kde.support, kde.density, lw=2, color='red')
ax.fill_between(kde.support, ci[:,0], ci[:,1],
color='grey', alpha='0.7')
ax.set_title('Kernel Density Gaussian (bw = %4.2f)' % kde.bw)
# use all kernels directly
x_grid = np.linspace(np.min(x), np.max(x), 51)
x_grid = np.linspace(-3, 3, 51)
kernel_names = ['Biweight', 'Cosine', 'Epanechnikov', 'Gaussian',
'Triangular', 'Triweight', #'Uniform',
]
fig = plt.figure()
for ii, kn in enumerate(kernel_names):
ax = fig.add_subplot(2, 3, ii+1) # without uniform
ax.hist(x, bins=10, normed=True, alpha=0.25)
#reduce bandwidth for Gaussian and Uniform which are to large in example
if kn in ['Gaussian', 'Uniform']:
args = (0.5,)
else:
args = ()
kernel = getattr(kernels, kn)(*args)
kde_grid = [kernel.density(x, xi) for xi in x_grid]
confint_grid = kernel.density_confint(kde_grid, len(x))
ax.plot(x_grid, kde_grid, lw=2, color='red', label=kn)
ax.fill_between(x_grid, confint_grid[:,0], confint_grid[:,1],
color='grey', alpha='0.7')
ax.legend(loc='upper left')
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
tagomatech/ETL | cboe_options/cboe_options.py | 1 | 12254 | '''
TODO: Code below isn't working any longer. Need to adapt it to slight changes in the CBOE website
Copyright (C) 2015, Edouard 'tagoma' Tallent
Class fetching options data from www.nasdaq.com
Nasdaq_option_quotes.py v0.2 (Nov15)
QuantCorner @ https://quantcorner.wordpress.com
'''
from bs4 import BeautifulSoup
import requests
import re
import numpy as np
import pandas as pd
class NasdaqOptions(object):
'''
Class NasdaqOptions fetches options data from Nasdaq website
User inputs:
Ticker: ticker
- Ticker for the underlying
Expiry: nearby
- 1st Nearby: 1
- 2nd Nearby: 2
- etc ...
Moneyness: money
- All moneyness: all
- In-the-money: in
- Out-of-the-money: out
- Near the money: near
Market: market
- Composite quote: Composite
- Chicago Board Options Exchange: CBO
- American Options Exchange: AOE
- New York Options Exchange: NYO
- Philadelphia Options Exchange: PHO
- Montreal Options Exchange: MOE
- Boston Options Exchange: BOX
- International Securities Exchange: ISE
- Bats Exchange Options Market: BTO
- NASDAQ Options: NSO
- C2(Chicago) Options Exchange: C2O
- NASDAQ OMX BX Options Exchange: BXO
- MIAX: MIAX
Option category: expi
- Weekly options: week
- Monthly options: stand
- Quarterly options: quart
- CEBO options (Credit Event Binary Options): cebo
'''
def __init__(self, ticker, nearby, money='near', market='cbo', expi='stan'):
self.ticker = ticker
self.nearby = nearby-1 # ' refers 1st nearby on NASDAQ website
#self.type = type # Deprecated
self.market = market
self.expi = expi
if money == 'near':
self.money = ''
else:
self.money = '&money=' + money
def get_options_table(self):
'''
- Loop over as many webpages as required to get the complete option table for the
option desired
- Return a pandas.DataFrame() object
'''
# Create an empty pandas.Dataframe object. New data will be appended to
old_df = pd.DataFrame()
# Variables
loop = 0 # Loop over webpages starts at 0
page_nb = 1 # Get the top of the options table
flag = 1 # Set a flag that will be used to call get_pager()
old_rows_nb = 0 # Number of rows so far in the table
# Loop over webpages
while loop < int(page_nb):
# Construct the URL
'''url = 'http://www.nasdaq.com/symbol/' + self.ticker + '/option-chain?dateindex='\
+ str(self.nearby) + '&callput=' + self.type + '&money=all&expi='\
+ self.expi + '&excode=' + self.market + '&page=' + str(loop+1)'''
url = 'http://www.nasdaq.com/symbol/' + self.ticker + '/option-chain?excode=' + self.market + self.money + '&expir=' + self.expi + '&dateindex=' + str(self.nearby) + '&page=' + str(loop+1)
# Query NASDAQ website
try:
response = requests.get(url)#, timeout=0.1)
# DNS lookup failure
except requests.exceptions.ConnectionError as e:
print('''Webpage doesn't seem to exist!\n%s''' % e)
pass
# Timeout failure
except requests.exceptions.ConnectTimeout as e:
print('''Slow connection!\n%s''' % e)
pass
# HTTP error
except requests.exceptions.HTTPError as e:
print('''HTTP error!\n%s''' % e)
pass
# Get webpage content
soup = BeautifulSoup(response.content, 'html.parser')
# Determine actual number of pages to loop over
if flag == 1: # It is run only once
# Get the number of page the option table lies on
last_page_raw = soup.find('a', {'id': 'quotes_content_left_lb_LastPage'})
last_page = re.findall(pattern='(?:page=)(\d+)', string=str(last_page_raw))
page_nb = ''.join(last_page)
flag = 0
# Extract table containing the option data from the webpage
table = soup.find_all('table')[4] # table #4 in the webpage is the one of interest
# Extract option data from table as a list
elems = table.find_all('td') # Python object
lst = [elem.text for elem in elems] # Option data as a readable list
# Rearrange data and create a pandas.DataFrame
arr = np.array(lst)
reshaped = arr.reshape((len(lst)/16, 16))
new_df = pd.DataFrame(reshaped)
frames = [old_df, new_df]
old_df = pd.concat(frames)
rows_nb = old_df.shape[0]
# Increment loop counter
if rows_nb > old_rows_nb:
loop+=1
old_rows_nb = rows_nb
elif rows_nb == old_rows_nb:
print('Problem while catching data.\n## You must try again. ##')
pass
else: # Case where rows have been deleted
# which shall never occur
print('Failure!\n## You must try again. ##')
pass
# Name the column 'Strike'
old_df.rename(columns={old_df.columns[8]:'Strike'}, inplace=True)
## Split into 2 dataframes (1 for calls and 1 for puts)
calls = old_df.ix[:,1:7]
puts = old_df.ix[:,10:16] # Slicing is not incluse of the last column
# Set 'Strike' column as dataframe index
calls = calls.set_index(old_df['Strike'])
puts = puts.set_index(old_df['Strike'])
## Headers names
headers = ['Last', 'Chg', 'Bid', 'Ask', 'Vol', 'OI']
calls.columns = headers
puts.columns = headers
return calls, puts
if __name__ == '__main__':
# Get data for Dec-15 SPX options, Dec-15 being the 2nd nearby
options = NasdaqOptions('SPX',2)
calls, puts = options.get_options_table()
# Write on the screen
print('\n######\nCalls:\n######\n', calls,\
'\n\n######\nPuts:\n######\n', puts)
'''
######
Calls:
######
Last Chg Bid Ask Vol OI
Strike
1900 179.40 0 38292
1905 103.75 0 9693
1910 191.30 0 8378
1915 186.45 -1.97 0 3671
1920 173.95 0 7218
1925 181.51 6.38 1 23678
1930 170.90 5.30 10 15743
1935 160.37 0 17814
1940 152.30 0 10564
1945 88.82 0 10687
1950 149.73 0 66844
1955 124.81 0 11206
1960 120.60 0 10682
1965 123.60 0 10737
1970 143.00 5.50 789 15184
1975 136.48 3.70 1 25654
1980 129.15 0.01 41 12569
1985 127.90 12.45 3 15602
1990 98.86 0 7900
1995 98.25 0 10133
2000 114.40 1.30 31 116981
2005 103.20 3.90 3 15097
2010 98.45 -4.07 2 8119
2015 94.33 12.83 2 7058
2020 86.40 0 17249
2025 90.50 -1.35 20 59959
2030 89.65 3.35 99 3303
2035 80.45 1.55 1 2979
2040 77.10 -1.55 13 5517
2045 74.25 0.15 15 3768
... ... ... .. .. ... ...
2115 28.75 1.45 136 6475
2120 26.12 1.42 64 5201
2125 23.90 1.40 154 28130
2130 21.27 1.67 10077 1647
2135 19.65 2.25 82 13484
2140 17.50 1.54 218 5150
2145 14.80 1.10 85 3644
2150 13.01 1.16 2073 55095
2155 11.23 0.98 71 1543
2160 9.90 1.05 70 10684
2165 8.60 1.05 14 1124
2170 7.23 0.52 28 2911
2175 6.30 0.77 166 22039
2180 5.30 0.70 74 5608
2185 4.55 0.65 8 524
2190 3.80 0.50 47 2154
2195 3.10 0.30 25 3563
2200 3.00 0.70 1815 63117
2205 2.15 0.13 32 429
2210 2.05 0.40 33 12771
2215 1.35 0 129
2220 1.50 0.40 5 2807
2225 1.10 0.09 61 18367
2230 0.90 0.05 25 187
2235 1.10 0.41 7 81
2240 0.70 0 444
2245 0.60 0 1060
2250 0.70 0.10 4058 42602
2275 0.35 0 37307
2300 0.30 0.05 9004 91173
[73 rows x 6 columns]
######
Puts:
######
Last Chg Bid Ask Vol OI
Strike
1900 5.80 -0.16 3135 115697
1905 5.95 -0.90 1 9772
1910 5.70 -0.75 1 8667
1915 6.90 0.41 13 4304
1920 6.70 -0.20 1146 9707
1925 7.02 -1.18 271 50314
1930 7.40 0.10 61 21183
1935 7.85 0 16832
1940 8.25 -0.05 19 12021
1945 8.35 -0.05 4 20285
1950 9.05 0.17 5308 115872
1955 9.28 0.06 8 11626
1960 9.55 -0.08 5051 16218
1965 10.13 0.11 9 11052
1970 10.50 -0.10 115 16865
1975 10.80 -0.33 218 35755
1980 11.35 -0.35 13 15200
1985 12.05 -1.81 3 16854
1990 12.80 0.01 67 8195
1995 13.59 0.19 14 10430
2000 14.00 -1.00 5473 142800
2005 14.13 -0.63 20 16162
2010 14.96 -0.76 4 11485
2015 16.46 -0.32 1 7123
2020 16.55 -0.75 144 21422
2025 17.84 -0.06 88 47092
2030 18.17 -0.88 166 7872
2035 21.55 16 2749
2040 20.02 -0.62 106 5465
2045 21.33 -0.32 20 4603
... ... ... .. .. ... ...
2115 51.10 0 10
2120 47.10 -0.40 6 147
2125 48.40 -1.15 19 5128
2130 50.00 -14.01 132 37
2135 54.50 0 1
2140 57.84 0 44
2145 64.35 -0.60 10 15
2150 63.16 0.51 5 9430
2155 0
2160 68.02 -142.03 40 1
2165 0
2170 79.50 -14.30 4 2
2175 98.90 0 153
2180 0
2185 0
2190 0
2195 112.28 0 8
2200 101.90 -5.30 1 5498
2205 0
2210 0
2215 208.50 0 5
2220 245.20 0 1
2225 247.66 0 84
2230 209.50 0 42
2235 0
2240 253.95 0 4
2245 0
2250 182.85 0 354
2275 205.80 0 796
2300 230.70 0 2562
[73 rows x 6 columns]
'''
| mit |
EFerriss/HydrogenCpx | HydrogenCpx/Fig13_xenolithEquilibration_data_1mm.py | 1 | 2855 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 03 15:14:02 2015
@author: Ferriss
Figure roughly following Padron-Navarta et al. 2014 Figure 8
Comparing olivine and cpx rim formation over time
Generate data and save to xenolithRims.txt in json format
"""
import pynams.diffusion as diff
import numpy as np
import matplotlib.pyplot as plt
import json
################### User input variables #####################################
lengths_microns = [1000.] * 3
logD_list = [-12.5, -12., -11.5, -11., -10.5, -10.]
logD_list = [-12.]
#time_minutes = [0.25, 0.5, 0.75] + list(np.arange(1, 11, 0.5)) + range(15, 65, 1) + range(65, 5*65, 5)
time_minutes = np.linspace(0.00001, 12., 100) * 60.
direction = 0 # 0=[100]*, 1=[010], 2=[001]
points_in_calc = 50
################## Set up and run calculation ################################
v_sat = np.sum(np.ones([points_in_calc, points_in_calc, points_in_calc]))
data = [time_minutes]
#data = []
for D_m2s in logD_list:
D3 = [D_m2s]*3
percent_water_remaining = np.zeros_like(time_minutes)
rim_location_microns = np.zeros_like(time_minutes)
idx = 0
for minutes in time_minutes:
time_seconds = minutes * 60.
v, x, y = diff.diffusion3Dnpi(lengths_microns, D3, time_seconds,
plot3=False, points=points_in_calc)
percent_water_remaining[idx] = 100. * np.sum(v) / v_sat
print ''.join(('logD=', '{:.1f}'.format(D_m2s), ', ',
'{:.1f}'.format(minutes), ' minutes done'))
idx = idx + 1
data.append(list(percent_water_remaining))
print data
data.append(logD_list)
# Save data to file
workfile = 'xenolithEquilibration-1mm.txt'
with open(workfile, 'w') as diff_file:
diff_file.write(json.dumps(data))
time_hours = time_minutes / 60.
#%% Plotting
fig = plt.figure()
fig.set_size_inches(6, 5)
ax = fig.add_subplot(111)
plt.style.use('paper')
colors = ['green', 'b', 'purple', 'red', 'black']
idx_D = 0
for percentRemaining in data[1:]:
ax.plot(time_hours, percentRemaining, '-', mew=1, linewidth=2,
color=colors[idx_D],
label=''.join(('logD=', '{:.1f}'.format(logD_list[idx_D]))))
idx_D = idx_D + 1
volume_mm3 = (lengths_microns[0] * lengths_microns[1] * lengths_microns[2]) / 1E9
tit = ''.join(('{:.1f}'.format(volume_mm3), ' mm$^3$cube'))
ylab = 'Preservation of hydrogen (%)'
ax.set_ylabel(ylab)
ax.set_xlabel('Time (hours)')
ax.set_title(tit)
ax.set_ylim(0., 100.)
ax.set_xlim(0, max(time_hours))
### time ranges for comparison
#ascent_rate_m_per_s = [0.1, 0.2, 0.5, 1., 2., 5., 10.]
#depth_km = np.array([2., 3.])
#ytloc = [300] * len(ascent_rate_m_per_s)
#
#depth_m = depth_km * 1E3
#idx = 0
#for rate in ascent_rate_m_per_s:
# ascent_time_s = depth_m / rate
# ascent_time_m = ascent_time_s / 60.
plt.show(fig)
print 'Finished' | mit |
cdw/muscle_saxs | muscle_saxs/xray_background.py | 1 | 7800 | #!/usr/bin/env python
# encoding: utf-8
"""
xray_background.py - remove background from small angle x-ray scattering imgs
Created by Dave Williams on 2014-10-09
"""
# System imports
import numpy as np
from scipy import optimize
import cv2
import matplotlib.pyplot as plt
# Local module imports
import support
import fake_img
## Find the background profile and fit it
def background_collapse(center, img, peaks, plot=False):
"""Collapse the image background, ignoring the peak regions.
Good ideas to be had here: http://goo.gl/2xEApw
Args:
center: x,y center of blocked image
img: from which background is extracted
peaks: row,col locations of peaks; don't want these in the background
plot: if we should plot the exclusion regions and profile (True/False)
or a list of two axes to plot onto
Gives:
background: profile of background
background_dists: pixel distances of background from center
"""
#import ipdb; ipdb.set_trace()
## Find peak angles
cx, cy = center
#m_thetas = [support.pt_to_pt_angle((cy, cx), pt) for pt in peaks]
m_thetas = [np.arctan2(pt[0] - cy, pt[1] - cx) for pt in peaks]
## With shifting, find the masking region
mask = np.ones((img.shape[0], img.shape[1]*2), dtype=np.float)
m_center = (int(round(center[0] + img.shape[1])), int(round(center[1])))
m_thetas = np.round(np.degrees(m_thetas)).astype(np.int)
theta_pm = 12 # amount to block on either side
m_angles = [(t-theta_pm, t+theta_pm) for t in m_thetas] # angles to block
m_axes = (img.shape[1], img.shape[1]) # should always fill screen
for angle in m_angles:
cv2.ellipse(mask, m_center, m_axes, 180, angle[0], angle[1], 0, -1)
mask = mask[:,img.shape[1]:]
# Construct a radial distance img
row, col = np.indices(img.shape)
r = np.sqrt((col-center[0])**2 + (row-center[1])**2)
# Coerce into ints for bincount
r = r.astype(np.int)
img = img.astype(np.int)
img = img*mask
# Do the counting
flat_count = np.bincount(r.ravel(), img.ravel())
rad_occurances = np.bincount(r.ravel())
radial_profile = flat_count/rad_occurances
# Kill the blocked region
highest_ind = radial_profile.argmax()
background = radial_profile[highest_ind:]
background_dists = np.arange(highest_ind,len(radial_profile))
# Plot if passed
if plot is not False:
if plot is True:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=[6,6])
else:
ax1, ax2 = plot
ax1.scatter(center[0], center[1], color='m')
ax1.imshow(mask*img)
colors = list(np.tile(
['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w', '.25', '.5', '.75'], 5))
for peak in peaks:
c = colors.pop(0)
ax1.scatter(peak[1], peak[0], c=c, s=40)
ax2.plot(background, linewidth=3)
ax1.set_title("Masked image for profiling")
ax2.set_title("Resulting radial profile")
plt.draw()
plt.tight_layout()
plt.show()
return background_dists, background
def _fit_double_exp(trace_y, trace_x, plot=False):
"""Fit a double exponential function to the passed trace.
Ignore the region to the left of the peak.
Takes:
trace_y: a nx1 data trace
trace_x: the x indices that go with trace_y
plot: whether or not to plot the fit
Gives:
vals: optimized parameters for a double exp
"""
# A residual function to test how good our fits are
dexp = support.double_exponential_1d
diff = lambda i, j: np.sum(np.abs(np.subtract(i,j)))
resi = lambda g: diff(dexp(trace_x, g[0], g[1], g[2], g[3], g[4]), trace_y)
# Guess some values then optimize
guess = [1.0, 1000.0, 0.01, 5000.0, 0.1]
opt_res = optimize.minimize(resi, guess, jac=False, bounds = ( (0, np.inf), (0, np.inf), (0, 1), (0, np.inf), (0, 1)))
success = opt_res['success']
vals = opt_res['x']
# Plot if desired
if plot is not False:
if plot is True:
fig, ax = plt.subplots(figsize=[6,3])
else:
ax = plot
plt.plot(trace_x, dexp(trace_x, *zip(vals)), 'c', linewidth=3)
plt.plot(trace_x, trace_y, 'r', linewidth=3)
ax.set_title("Real (r) and fitted (c) values")
plt.draw()
plt.tight_layout()
plt.show()
return vals
## Generate a fake background and subtract it from a passed image
def _fake_background(size, mask_center, mask_rad, diff_center, back_vals):
"""Generate a fake background image from the passed (fitted) values.
Args:
size (tuple): (row, col) size of image to generate
mask_center: the center of the masked region
mask_rad: the radius of the masked region
diff_center: the center of the diffraction (and background) pattern
back_vals (iter): the (a,b,c,d,e) values of the double exponential
Returns:
img: the fake background image
"""
# Flips and unpacks
a, b, c, d, e = back_vals
mask_center = (mask_center[1], mask_center[0])
diff_center = (diff_center[1], diff_center[0])
exp_img = fake_img.background(size, diff_center, a, b, c, d, e)
mask_img = fake_img.masking(size, mask_center, mask_rad)
return exp_img*mask_img
def find_and_remove_background(mask_cen, mask_rad, diff_cen, img, peaks,
plot=False):
"""Fit/subtract the background of an image and the peaks of its angles.
Args:
mask_cen: the center of the masking region
mask_rad: the radius of the masking region
diff_cen: the center of the diffraction pattern
img: the image whose background we're interested in
peaks: the peaks we want to exclude (at least one)
plot: to plot the masks and fit or not (True/False) or a list of
three axes to plot onto
Returns:
img: img-background, to best of abilities
"""
# Plot set up
if plot is not False:
if plot is True:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=[6,9])
ax12 = (ax1, ax2)
else:
ax12 = plot[0], plot[1]
ax3 = plot[2]
else:
ax12, ax3 = False, False
size = img.shape
back_x, back_y = background_collapse(diff_cen, img, peaks, plot=ax12)
fits = _fit_double_exp(back_y, back_x, plot=ax3)
fake_back_img = _fake_background(size, mask_cen, mask_rad, diff_cen, fits)
return img-fake_back_img
## Use peaks to find info about image
def find_diffraction_center(pairs, which_list='longest'):
""" Find the diffraction center based off of pairs of points.
By default, use the longest list of pairs.
Takes:
pairs: lists of point pairs, output of extract_pairs
which_list: "longest" or index location in pairs
Gives:
center: row,col center of the diffraction image
"""
# Which pair list to use
if which_list == 'longest':
which_list = np.argmax(map(len, pairs))
# Find mean middle point
mid = lambda pair: np.add(np.subtract(pair[0], pair[1])/2.0, pair[1])
center = np.mean([mid(p) for p in pairs[which_list]], 0)
return center
## Test if run directly
def main():
import support
import peak_finder
SAMPLEFILE = 'sampleimg1.tif'
data = support.image_as_numpy(SAMPLEFILE) # load
block = find_blocked_region(data, True) # find blocker
unorg_peaks = peak_finder.peaks_from_image(data, block, plot=True)
success, thetas, peaks = peak_finder.optimize_thetas(block[0], unorg_peaks)
back_dists, back = background_collapse(block[0], data, thetas, True)
back_params = _fit_double_exp(back, back_dists, True)
if __name__ == '__main__':
main()
| mit |
bluegod/OSCAAR | oscaar/dataBank.py | 1 | 48054 | '''oscaar v2.0
Module for differential photometry
Developed by Brett Morris, 2011-2013 & minor modifications by Luuk Visser
'''
import numpy as np
import pyfits
from matplotlib import pyplot as plt
from scipy import optimize
from glob import glob
import os
import re
import oscaar
import mathMethods
import sys
import systematics
oscaarpath = os.path.dirname(os.path.abspath(oscaar.__file__))
oscaarpathplus = os.path.join(oscaarpath,'extras')
class dataBank:
'''
Methods for easily storing and accessing information from the entire
differential photometry process with OSCAAR.
Core Developer: Brett Morris
'''
def __init__(self, initParFilePath=None):
"""
Get the inital guesses for the initial centroids of the stars from the DS9 regions file,
create dictionaries in which to store all of the data collected for each star, and for each
aperture radius. Allocate the memory for these arrays wherever possible. Parse the init.par
file to grab the paths and initial parameters for the run.
Parameters
----------
initParFilePath : str
Optional full path to the init.par file to use for the data
"""
self.dict = {}
self.parseInit(initParFilePath)
self.flatPath = self.dict["flatPath"]
self.rawRegionsList = self.dict["regPaths"]
self.ingress = self.dict["ingress"]
self.egress = self.dict["egress"]
self.apertureRadii = self.dict["apertureRadius"]
self.trackingZoom = self.dict["trackingZoom"]
self.ccdGain = self.dict["ccdGain"]
self.trackPlots = self.dict["trackPlots"]
self.photPlots = self.dict["photPlots"]
self.smoothConst = self.dict ["smoothConst"]
self.darksPath = self.dict["darksPath"]
self.imagesPaths = self.dict["imagesPaths"]
self.timeKeyword = self.dict["timeKeyword"]
if self.timeKeyword == 'JD':
# Since we're trying to convert to JD, use a dummy lambda function
self.convertToJD = lambda x: x
elif self.timeKeyword == 'DATE-OBS':
self.convertToJD = mathMethods.ut2jdSplitAtT
#if not hasattr(sys, 'real_prefix'):
# assert len(self.imagesPaths) > 1, 'Must have at least two data images'
if not hasattr(sys, 'real_prefix'):
self.masterFlat = np.ones_like(pyfits.getdata(self.imagesPaths[0]))
elif self.flatPath != '':
self.masterFlat = pyfits.getdata(self.flatPath)
self.masterFlatPath = self.flatPath
elif self.flatPath == '':
self.masterFlat = np.ones_like(pyfits.getdata(self.imagesPaths[0]))
self.allStarsDict = {}
self.regionsFileList, self.regionsFITSrefsList = self.parseRawRegionsList(self.rawRegionsList)
init_x_list,init_y_list = self.parseRegionsFile(self.regionsFileList[0])
zeroArray = np.zeros_like(self.imagesPaths,dtype=np.float32)
self.times = np.zeros_like(self.imagesPaths,dtype=np.float64)
self.keys = []
self.targetKey = '000'
Nradii = len(self.apertureRadii)
for i in range(0,len(init_x_list)):
self.allStarsDict[str(i).zfill(3)] = {'x-pos':np.copy(zeroArray), 'y-pos':np.copy(zeroArray),\
'rawFlux':[np.copy(zeroArray) for j in range(Nradii)], 'rawError':[np.copy(zeroArray) for j in range(Nradii)],'flag':False,\
'scaledFlux':[np.copy(zeroArray) for j in range(Nradii)], 'scaledError':[np.copy(zeroArray) for j in range(Nradii)], 'chisq':np.zeros_like(self.apertureRadii)}
self.allStarsDict[str(i).zfill(3)]['x-pos'][0] = init_x_list[i]
self.allStarsDict[str(i).zfill(3)]['y-pos'][0] = init_y_list[i]
self.keys.append(str(i).zfill(3))
def getDict(self):
'''Return dictionary of all star data called ``allStarsDict`.'''
return self.allStarsDict
def getMeanDarkFrame(self):
if type(self.darksPath) == str and self.darksPath == "":
return np.zeros_like(pyfits.getdata(self.imagesPaths[0]))
else:
# Else it will be a list of strings
return systematics.meanDarkFrame(self.darksPath)
def centroidInitialGuess(self,expNumber,star):
'''
Gets called for each exposure. If called on the first exposure, it will return
the intial centroid guesses input by the DS9 regions file. If any other image
and only one regions file has been submitted, it will return the previous centroid
as the initial guess for subsequent exposures. If multiple regions files have been
submitted, it will return the initial guesses in those regions files when the image path
with index ``expNumber`` is equivalent to the path stored for that regions file's
"Reference FITS image".
Parameters
----------
expNumber : int
The index of the exposure currently being analyzed. The image gets called
by its index from the list of image paths returned by getPaths().
star : str
The key from ``allStarsDict`` that corresponds to the star for which you'd
like a centroid initial guess.
Returns
-------
est_x : float
Estimated centroid position of the star ``star`` along the *x*-axis of pixels for
exposure index ``expNumber``
est_y : float
Estimated centroid position of the star ``star`` along the *y*-axis of pixels for
exposure index ``expNumber``
'''
if expNumber == 0:
est_x = self.allStarsDict[star]['x-pos'][0] ## Use DS9 regions file's estimate for the
est_y = self.allStarsDict[star]['y-pos'][0] ## stellar centroid for the first exposure
elif self.imagesPaths[expNumber] in self.regionsFITSrefsList:
refIndex = self.regionsFITSrefsList.index(self.imagesPaths[expNumber])
init_x_list, init_y_list = self.parseRegionsFile(self.regionsFileList[refIndex])
est_x = init_x_list[int(star)]
est_y = init_y_list[int(star)]
else:
est_x = self.allStarsDict[star]['x-pos'][expNumber-1] ## All other exposures use the
est_y = self.allStarsDict[star]['y-pos'][expNumber-1] ## previous exposure centroid as estimate
return est_x, est_y
def storeCentroid(self,star,exposureNumber,xCentroid,yCentroid):
'''
Store the centroid data collected by `trackSmooth`
Parameters
----------
star : string
Key for the star for which the centroid has been measured
exposureNumber : int
Index of exposure being considered
xCentroid : float
*x*-centroid of the star
yCentroid : float
*y*-centroid of the star
'''
self.allStarsDict[star]['x-pos'][exposureNumber] = xCentroid
self.allStarsDict[star]['y-pos'][exposureNumber] = yCentroid
def storeFlux(self,star,exposureNumber,rawFlux,rawError):
'''
Store the flux and error data collected by `phot`
Parameters
----------
star : string
Key for the star from the ``allStarsDict`` dictionary
exposureNumber : int
Index of exposure being considered
rawFlux : float
flux measured, to be stored
rawError : float
flux uncertainty measured, to be stored
'''
self.allStarsDict[star]['rawFlux'][exposureNumber] = rawFlux
self.allStarsDict[star]['rawError'][exposureNumber] = rawError
def storeFluxes(self,star,exposureNumber,rawFluxes,rawErrors):
'''
Store the flux and error data collected by oscaar.phot()
Parameters
----------
star : str
Key for the star from the `allStarsDict` dictionary
exposureNumber : int
Index of exposure being considered
rawFluxes : list of floats
flux measured, to be stored
rawErrors : list of floats
photon noise measured, to be stored
'''
for apertureRadiusIndex in range(len(self.apertureRadii)):
self.allStarsDict[star]['rawFlux'][apertureRadiusIndex][exposureNumber] = rawFluxes[apertureRadiusIndex]
self.allStarsDict[star]['rawError'][apertureRadiusIndex][exposureNumber] = rawErrors[apertureRadiusIndex]
def getPaths(self):
'''Return the paths to the raw images to be used'''
return self.imagesPaths
def getFluxes(self,star):
'''
Return list of fluxes for the star with key ``star``
Parameters
----------
star : str
Key for the star from the ``allStarsDict`` dictionary
Returns
-------
fluxes : list
List of fluxes for each aperture radius
'''
return self.allStarsDict[star]['rawFlux']
def getErrors(self,star):
'''Return the errors for one star, where the star parameter is the key for the
star of interest.'''
return self.allStarsDict[star]['rawError']
def storeTime(self,expNumber):
'''
Store the time in JD from the FITS header.
Parameters
----------
exposureNumber : string
Index of exposure being considered
'''
#try:
timeStamp = pyfits.getheader(self.getPaths()[expNumber])[self.timeKeyword]
#except KeyError:
# print 'Input Error: The Exposure Time Keyword indicated in observatory.par is not a valid key: ',self.timeKeyword
#finally:
self.times[expNumber] = self.convertToJD(timeStamp)
def getTimes(self):
'''Return all times collected with dataBank.storeTime()'''
return self.times
def getFlag(self,star):
'''Return the flag for the star with key `star` '''
return self.allStarsDict[star]['flag']
def getAllFlags(self):
'''Return flags for all stars'''
flags = []
for star in self.allStarsDict:
flags.append(self.allStarsDict[star]['flag'])
self.flags = flags
return flags
def setFlag(self,star,setting):
'''Set flag for star with key <star> to <setting> where
setting is a Boolean'''
self.allStarsDict[star]['flag'] = setting
def getKeys(self):
'''Return the keys for all of the stars'''
return self.keys
def scaleFluxes(self):
'''
When all fluxes have been collected, run this to re-scale the fluxes of each
comparison star to the flux of the target star. Do the same transformation on the errors.
'''
for star in self.allStarsDict:
if star != self.targetKey:
self.allStarsDict[star]['scaledFlux'], m = mathMethods.regressionScale(self.getFluxes(star),self.getFluxes(self.targetKey),self.getTimes(),self.ingress,self.egress,returncoeffs=True)
print m
self.allStarsDict[star]['scaledError'] = np.abs(m)*self.getErrors(star)
if star == self.targetKey: ## (Keep the target star the same)
self.allStarsDict[star]['scaledFlux'] = self.allStarsDict[star]['rawFlux']
self.allStarsDict[star]['scaledError'] = self.allStarsDict[star]['rawError']
def getFluxes_multirad(self,star,apertureRadiusIndex):
'''Return the fluxes for one star, where the star parameter is the key for the
star of interest.'''
return self.allStarsDict[star]['rawFlux'][apertureRadiusIndex]
def getErrors_multirad(self,star,apertureRadiusIndex):
'''Return the errors for one star, where the star parameter is the key for the
star of interest.'''
return self.allStarsDict[star]['rawError'][apertureRadiusIndex]
def scaleFluxes_multirad(self):
'''
When all fluxes have been collected, run this to re-scale the fluxes of each
comparison star to the flux of the target star. Do the same transformation on the errors.
'''
for star in self.allStarsDict:
for apertureRadiusIndex in range(len(self.apertureRadii)):
if star != self.targetKey:
print self.getFluxes_multirad(star,apertureRadiusIndex)[0]
self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex], m = mathMethods.regressionScale(self.getFluxes_multirad(star,apertureRadiusIndex),self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getTimes(),self.ingress,self.egress,returncoeffs=True)
#print m
self.allStarsDict[star]['scaledError'][apertureRadiusIndex] = np.abs(m)*self.getErrors_multirad(star,apertureRadiusIndex)
if star == self.targetKey: ## (Keep the target star the same)
self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex] = self.allStarsDict[star]['rawFlux'][apertureRadiusIndex]
self.allStarsDict[star]['scaledError'][apertureRadiusIndex] = self.allStarsDict[star]['rawError'][apertureRadiusIndex]
def getScaledFluxes(self,star):
'''Return the scaled fluxes for one star, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledFlux'])
def getScaledErrors(self,star):
'''Return the scaled fluxes for one star, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledError'])
def getScaledFluxes_multirad(self,star,apertureRadiusIndex):
'''Return the scaled fluxes for star and one aperture, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex])
def getScaledErrors_multirad(self,star,apertureRadiusIndex):
'''Return the scaled errors for star and one aperture, where the star parameter is the
key for the star of interest.'''
return np.array(self.allStarsDict[star]['scaledError'][apertureRadiusIndex])
def calcChiSq(self):
"""
Calculate the :math:`$\chi^2$` for the fluxes of each comparison star and the fluxes of the target star. This
metric can be used to suggest which comparison stars have similar overall trends to the target star.
"""
for star in self.allStarsDict:
self.allStarsDict[star]['chisq'] = mathMethods.chiSquared(self.getFluxes(self.targetKey),self.getFluxes(star))
chisq = []
for star in self.allStarsDict:
chisq.append(self.allStarsDict[star]['chisq'])
self.chisq = np.array(chisq)
self.meanChisq = np.mean(chisq)
self.stdChisq = np.std(chisq)
def calcChiSq_multirad(self,apertureRadiusIndex):
"""
Calculate the :math:`$\chi^2$` for the fluxes of each comparison star and the fluxes of the target star. This
metric can be used to suggest which comparison stars have similar overall trends to the target star.
"""
for star in self.allStarsDict:
print self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getFluxes_multirad(star,apertureRadiusIndex)
self.allStarsDict[star]['chisq'][apertureRadiusIndex] = mathMethods.chiSquared(self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getFluxes_multirad(star,apertureRadiusIndex))
chisq = []
for star in self.allStarsDict:
chisq.append(self.allStarsDict[star]['chisq'][apertureRadiusIndex])
self.chisq = np.array(chisq)
self.meanChisq = np.mean(chisq)
self.stdChisq = np.std(chisq)
def calcMeanComparison_multirad(self,ccdGain=1):
"""
Take the regression-weighted mean of some of the comparison stars
to produce one comparison star flux to compare to the target to
produce a light curve.
The comparison stars used are those whose :math:`$\chi^2$`s calculated by
`calcChiSq()` are less than :math:`$2\sigma$` away from the other :math:`$\chi^2$`s.
This condition removes outlier comparison stars, which can be caused by intrinsic
variability, tracking inaccuracies, or other effects.
"""
self.meanComparisonStars = []
self.meanComparisonStarErrors = []
self.comparisonStarWeights = []
for apertureRadiusIndex in range(len(self.apertureRadii)):
## Check whether chi-squared has been calculated already. If not, compute it.
chisq = []
for star in self.allStarsDict: chisq.append(self.allStarsDict[star]['chisq'])
chisq = np.array(chisq)
#if all(chisq == 0): self.calcChiSq_multirad(apertureRadiusIndex)
if (chisq==0).all(): self.calcChiSq_multirad(apertureRadiusIndex)
## Begin regression technique
numCompStars = len(self.allStarsDict) - 1
targetFullLength = len(self.getScaledFluxes_multirad(self.targetKey,apertureRadiusIndex))
print "Aperture rad:", apertureRadiusIndex
print "Target raw flux:",self.getFluxes_multirad(self.targetKey,apertureRadiusIndex)
print "Target scaled flux:",self.getScaledFluxes_multirad(self.targetKey,apertureRadiusIndex)
target = self.getFluxes_multirad(self.targetKey,apertureRadiusIndex)[self.outOfTransit()]
compStars = np.zeros([targetFullLength,numCompStars])
compStarsOOT = np.zeros([len(target),numCompStars])
compErrors = np.copy(compStars)
columnCounter = 0
acceptedCompStarKeys = []
compStarKeys = []
for star in self.allStarsDict:
if star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) < 2*self.stdChisq).any():
compStars[:,columnCounter] = self.getScaledFluxes_multirad(star,apertureRadiusIndex).astype(np.float64)
compStarsOOT[:,columnCounter] = self.getScaledFluxes_multirad(star,apertureRadiusIndex)[self.outOfTransit()].astype(np.float64)
compErrors[:,columnCounter] = self.getScaledErrors_multirad(star,apertureRadiusIndex).astype(np.float64)
compStarKeys.append(int(star))
columnCounter += 1
elif star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) > 2*self.stdChisq):
print 'Star '+str(star)+' excluded from regression'
compStarKeys.append(int(star))
columnCounter += 1
initP = np.zeros([numCompStars])+ 1./numCompStars
def errfunc(p,target):
if all(p >=0.0): return np.dot(p,compStarsOOT.T) - target ## Find only positive coefficients
#return np.dot(p,compStarsOOT.T) - target
bestFitP = optimize.leastsq(errfunc,initP[:],args=(target.astype(np.float64)),maxfev=10000000,epsfcn=np.finfo(np.float32).eps)[0]
print '\nBest fit regression coefficients:',bestFitP
print 'Default weight:',1./numCompStars
self.comparisonStarWeights_i = np.vstack([compStarKeys,bestFitP])
self.meanComparisonStar = np.dot(bestFitP,compStars.T)
self.meanComparisonStarError = np.sqrt(np.dot(bestFitP**2,compErrors.T**2))
self.meanComparisonStars.append(self.meanComparisonStar)
self.meanComparisonStarErrors.append(self.meanComparisonStarError)
self.comparisonStarWeights.append(self.comparisonStarWeights_i)
return self.meanComparisonStars, self.meanComparisonStarErrors
def getAllChiSq(self):
"""
Return :math:`$\chi^2$`s for all stars
"""
return self.chisq
def outOfTransit(self):
"""
Boolean array where `True` are the times in `getTimes()` that are
before ingress or after egress.
Returns
-------
List of bools
"""
return (self.getTimes() < self.ingress) + (self.getTimes() > self.egress)
def calcMeanComparison(self,ccdGain=1):
"""
Take the regression-weighted mean of some of the comparison stars
to produce one comparison star flux to compare to the target to
produce a light curve.
The comparison stars used are those whose chi-squareds calculated by
self.calcChiSq() are less than 2*sigma away from the other chi-squareds.
This condition removes outliers.
"""
## Check whether chi-squared has been calculated already. If not, compute it.
chisq = []
for star in self.allStarsDict: chisq.append(self.allStarsDict[star]['chisq'])
chisq = np.array(chisq)
if all(chisq == 0): self.calcChiSq()
## Begin regression technique
numCompStars = len(self.allStarsDict) - 1
targetFullLength = len(self.getScaledFluxes(self.targetKey))
target = self.getFluxes(self.targetKey)[self.outOfTransit()]
compStars = np.zeros([targetFullLength,numCompStars])
compStarsOOT = np.zeros([len(target),numCompStars])
compErrors = np.copy(compStars)
columnCounter = 0
compStarKeys = []
for star in self.allStarsDict:
if star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) < 2*self.stdChisq):
compStars[:,columnCounter] = self.getScaledFluxes(star).astype(np.float64)
compStarsOOT[:,columnCounter] = self.getScaledFluxes(star)[self.outOfTransit()].astype(np.float64)
compErrors[:,columnCounter] = self.getScaledErrors(star).astype(np.float64)
compStarKeys.append(int(star))
columnCounter += 1
elif star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) > 2*self.stdChisq):
print 'Star '+str(star)+' excluded from regression'
compStarKeys.append(int(star))
columnCounter += 1
initP = np.zeros([numCompStars])+ 1./numCompStars
def errfunc(p,target):
if all(p >=0.0): return np.dot(p,compStarsOOT.T) - target ## Find only positive coefficients
#return np.dot(p,compStarsOOT.T) - target
bestFitP = optimize.leastsq(errfunc,initP[:],args=(target.astype(np.float64)),maxfev=10000000,epsfcn=np.finfo(np.float32).eps)[0]
print '\nBest fit regression coefficients:',bestFitP
print 'Default weight:',1./numCompStars
self.comparisonStarWeights = np.vstack([compStarKeys,bestFitP])
self.meanComparisonStar = np.dot(bestFitP,compStars.T)
self.meanComparisonStarError = np.sqrt(np.dot(bestFitP**2,compErrors.T**2))
return self.meanComparisonStar, self.meanComparisonStarError
def computeLightCurve(self,meanComparisonStar,meanComparisonStarError):
'''
Divide the target star flux by the mean comparison star to yield a light curve,
save the light curve into the dataBank object.
INPUTS: meanComparisonStar - The fluxes of the (one) mean comparison star
RETURNS: self.lightCurve - The target star divided by the mean comparison
star, i.e., the light curve.
'''
self.lightCurve = self.getFluxes(self.targetKey)/meanComparisonStar
self.lightCurveError = np.sqrt(self.lightCurve**2 * ( (self.getErrors(self.targetKey)/self.getFluxes(self.targetKey))**2 + (meanComparisonStarError/meanComparisonStar)**2 ))
return self.lightCurve, self.lightCurveError
def computeLightCurve_multirad(self,meanComparisonStars,meanComparisonStarErrors):
'''
Divide the target star flux by the mean comparison star to yield a light curve,
save the light curve into the `dataBank` object.
Parameters
----------
meanComparisonStar : list
The fluxes of the (one) mean comparison star
Returns
-------
self.lightCurves:
The fluxes of the target star divided by the fluxes of the mean comparison
star, i.e., the light curve
self.lightCurveErrors:
The propagated errors on each relative flux in `self.lightCurves`
'''
self.lightCurves = []
self.lightCurveErrors = []
for apertureRadiusIndex in range(len(self.apertureRadii)):
lightCurve = self.getFluxes_multirad(self.targetKey,apertureRadiusIndex)/meanComparisonStars[apertureRadiusIndex]
self.lightCurves.append(lightCurve)
self.lightCurveErrors.append(np.sqrt(lightCurve**2 * ( (self.getErrors_multirad(self.targetKey,apertureRadiusIndex)/self.getFluxes_multirad(self.targetKey,apertureRadiusIndex))**2 +\
(meanComparisonStarErrors[apertureRadiusIndex]/meanComparisonStars[apertureRadiusIndex])**2 )))
return self.lightCurves, self.lightCurveErrors
def getPhotonNoise(self):
'''
Calculate photon noise using the lightCurve and the meanComparisonStar
RETURNS: self.photonNoise - The estimated photon noise limit
'''
self.photonNoise = self.lightCurve*self.meanComparisonStarError
return self.photonNoise
def parseInit(self, initParFilePath=None):
"""
Parses `init.par`, a plain text file that contains all of the running parameters
that control the `differentialPhotometry.py` script. `init.par` is written by
the OSCAAR GUI or can be edited directly by the user.
Parameters
----------
initParFilePath : str
Optional full path to the init.par file to use for the data
"""
if initParFilePath is None:
init = open(os.path.join(
os.path.dirname(os.path.abspath(oscaar.__file__)),
'init.par'), 'r').read().splitlines()
else:
if os.path.exists(initParFilePath):
init = open(os.path.abspath(initParFilePath), 'r').read().splitlines()
else:
raise ValueError, (
"PAR file {0} cannot be found.".format(initParFilePath))
for line in init:
if len(line.split()) > 1:
inline = line.split(':', 1)
name = inline[0].strip()
value = str(inline[1].strip())
list = [("Path to Master-Flat Frame", "flatPath"),
("Path to Regions File", "regPaths"),
("Ingress", "ingress"), ("Egress", "egress"),
("Radius", "apertureRadius"), ("Tracking Zoom", "trackingZoom"),
("CCD Gain", "ccdGain"), ("Plot Tracking", "trackPlots"),
("Plot Photometry", "photPlots"), ("Smoothing Constant", "smoothConst"),
("Output Path","outputPath"), ("Path to Dark Frames", "darksPath"),
("Path to Data Images", "imagesPaths"), ("Exposure Time Keyword", "timeKeyword")]
for string,save in list:
if string == name:
#if name == "Smoothing Constant" or name == "Radius" or name == "Tracking Zoom" or name == "CCD Gain":
if name == "Smoothing Constant" or name == "Tracking Zoom" or name == "CCD Gain":
self.dict[save] = float(value)
elif name == "Ingress" or name == "Egress":
self.dict[save] = mathMethods.ut2jd(value)
elif name == "Plot Photometry" or name == "Plot Tracking":
if value == "on":
self.dict[save] = True
else:
self.dict[save] = False
elif name == "Path to Dark Frames" or name == "Path to Data Images":
value = inline[1].strip()
if len(glob(value)) > 0:
self.dict[save] = np.sort(glob(value))
elif value == "":
self.dict[save] = ""
else:
tempArr = []
for path in str(inline[1]).split(','):
path = path.strip()
path = os.path.join(oscaarpathplus,os.path.abspath(path))
tempArr.append(path)
self.dict[save] = np.sort(tempArr)
elif name == "Radius":
if len(value.split(',')) == 3:
## If multiple aperture radii are requested by dictating the range, enumerate the range:
apertureRadiusMin, apertureRadiusMax, apertureRadiusStep = map(float,value.split(','))
if (apertureRadiusMax-apertureRadiusMin) % apertureRadiusStep == 0:
apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax+apertureRadiusStep, apertureRadiusStep)
else:
apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax, apertureRadiusStep)
self.dict[save] = apertureRadii
elif len(value.split(',')) == 1:
## If only one aperture radius is requested, make a list with only that one element
self.dict[save] = [float(value)]
else:
self.dict[save] = [float(i) for i in value.split(',')]
elif name == "Output Path":
self.outputPath = os.path.join(oscaarpathplus,os.path.abspath(value))
else:
self.dict[save] = value
def parseRegionsFile(self,regPath):
"""
Parses the regions files (.REG) created by DS9. These files are written in plain text, where
each circuluar region's centroid and radius are logged in the form "circle(`x-centroid`,`y-centroid`,`radius`)".
This method uses regular expressions to parse out the centroids.
Parameters
----------
regPath : string
Path to the regions file to read
Returns
-------
init_x_list : list
Initial estimates for the x-centroids
init_y_list : list
Initial estimates for the y-centroids
"""
regionsData = open(regPath,'r').read().splitlines()
init_x_list = []
init_y_list = []
for i in range(0,len(regionsData)):
if regionsData[i][0:6] == 'circle':
y,x = re.split("\,",re.split("\(",regionsData[i])[1])[0:2]
init_y_list.append(float(y))
init_x_list.append(float(x))
return init_x_list,init_y_list
def parseRawRegionsList(self,rawRegionsList):
"""
Split up the `rawRegionsList`, which should be in the format:
<first regions file>,<reference FITS file for the first regs file>;<second> regions file>,
<reference FITS file for the first regs file>;....
into a list of regions files and a list of FITS reference files.
"""
regionsFiles = []
refFITSFiles = []
for pair in rawRegionsList.split(';'):
if len(pair.split(",")) == 2:
regionsFile, refFITSFile = pair.split(',')
regionsFiles.append(regionsFile)
refFITSFiles.append(refFITSFile)
return regionsFiles, refFITSFiles
def plot(self,pointsPerBin=10):
"""
Produce a plot of the light curve, show it. Over-plot 10-point median binning
of the light curve.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
"""
plt.close()
times = self.getTimes()
meanComparisonStar, meanComparisonStarError = self.calcMeanComparison(ccdGain = self.ccdGain)
lightCurve, lightCurveErr = self.computeLightCurve(meanComparisonStar, meanComparisonStarError)
binnedTime, binnedFlux, binnedStd = mathMethods.medianBin(times,lightCurve,pointsPerBin)
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
axis.errorbar(times,lightCurve,yerr=lightCurveErr,fmt='k.',ecolor='gray')
axis.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title('Light Curve')
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Relative Flux')
plt.ioff()
plt.show()
def plotLightCurve(self,pointsPerBin=10,apertureRadiusIndex=0):
"""
Produce a plot of the light curve, show it. Over-plot 10-point median binning
of the light curve.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
binnedTime, binnedFlux, binnedStd = mathMethods.medianBin(self.times,self.lightCurves[apertureRadiusIndex],pointsPerBin)
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
axis.errorbar(self.times,self.lightCurves[apertureRadiusIndex],yerr=self.lightCurveErrors[apertureRadiusIndex],fmt='k.',ecolor='gray')
axis.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title(('Light curve for aperture radius %s' % self.apertureRadii[apertureRadiusIndex]))
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Relative Flux')
plt.ioff()
plt.show()
def plotRawFluxes(self,apertureRadiusIndex=0,pointsPerBin=10):
"""
Plot all raw flux time series for a particular aperture radius,
for each comparison star.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
plt.ion()
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
for star in self.allStarsDict:
axis.errorbar(self.times,self.allStarsDict[star]['rawFlux'][apertureRadiusIndex],yerr=self.allStarsDict[star]['rawError'][apertureRadiusIndex],fmt='o')
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title(('Raw fluxes for aperture radius %s' % self.apertureRadii[apertureRadiusIndex]))
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Counts')
plt.ioff()
plt.show()
def plotScaledFluxes(self,apertureRadiusIndex=0,pointsPerBin=10):
"""
Plot all scaled flux time series for a particular aperture radius,
for each comparison star.
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
plt.ion()
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
for star in self.allStarsDict:
axis.errorbar(self.times,self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex],yerr=self.allStarsDict[star]['scaledError'][apertureRadiusIndex],fmt='o')
axis.axvline(ymin=0,ymax=1,x=self.ingress,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress,color='k',ls=':')
axis.set_title(('Scaled fluxes for aperture radius: %s' % self.apertureRadii[apertureRadiusIndex]))
axis.set_xlabel('Time (JD)')
axis.set_ylabel('Counts')
plt.ioff()
plt.show()
def plotCentroidsTrace(self,pointsPerBin=10):
"""
Plot all centroid positions for a particular aperture radius,
for each comparison star. The plot will be in (`x`,`y`) coordinates
to visualize the physical image drift (this is not a plot as a function
of time).
Parameters
----------
pointsPerBin : int, optional (default=10)
Integer number of points to accumulate per bin.
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
axis.format_coord = format_coord
for star in self.allStarsDict:
axis.plot(self.allStarsDict[star]['y-pos'],self.allStarsDict[star]['x-pos'])
axis.set_title('Tracing Stellar Centroids')
axis.set_xlabel('X')
axis.set_ylabel('Y')
plt.ioff()
plt.show()
def plotComparisonWeightings(self, apertureRadiusIndex=0):
"""
Plot histograms visualizing the relative weightings of the comparison
stars used to produce the "mean comparison star", from which the
light curve is calculated.
Parameters
----------
apertureRadiusIndex : int, optional (default=0)
Index of the aperture radius list corresponding to the aperture radius
from which to produce the plot.
"""
plt.ion()
weights = self.comparisonStarWeights[apertureRadiusIndex]
weights = np.sort(weights,axis=1)
width = 0.5
indices = weights[0,:]
coefficients = weights[1,:]
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
ax = fig.add_subplot(111)
ax.set_xlim([0,len(indices)+1])
ax.set_xticks(indices+width/2)
ax.set_xticklabels(["Star "+str(i) for i in range(len(indices))])
ax.set_xlabel('Comparison Star')
ax.set_ylabel('Normalized Weighting')
ax.set_title('Comparison Star Weights into the Composite Comparison Star for aperture radius %s' \
% self.apertureRadii[apertureRadiusIndex])
ax.axhline(xmin=0,xmax=1,y=1.0/len(indices),linestyle=':',color='k')
ax.bar(indices,coefficients,width,color='w')
plt.ioff()
plt.show()
def updateMCMC(self,bestp,allparams,acceptanceRate,dataBankPath,uncertainties):
"""
Assigns variables within the dataBank object for the results of an MCMC run.
Parameters
----------
bestp : list
Best-fit parameters from the MCMC run. The list elements correspond to [<ratio of planetary radius
to stellar radius>,<ratio of semi-major axis to stellar radius>,<inclination>,<mid-transit time>].
allparams : 2D matrix
This matrix represents the many "states", "trails" or "links in the chain" that are accepted and saved
throughout the Metropolis-Hastings process in the MCMC scripts. From allparams we can calculate the
uncertainties on each best-fit parameter.
acceptanceRate : float
The final acceptance rate achieved by the chain; the ratio of the number of accepted states and the
number of states attempted
dataBankPath : string
Path to the dataBank object pickle (aka "OSCAAR pkl") to update
uncertainties : list of lists
:math:`$\pm 1\sigma$` uncertainties on each of the best-fit parameters in `bestp`
"""
self.MCMC_bestp = bestp
self.MCMC_allparams = allparams
self.MCMC_acceptanceRate = acceptanceRate
self.dataBankPath = dataBankPath
self.MCMC_uncertainties = uncertainties
def uncertaintyString(self):
"""
Returns
-------
savestring : string
A string formatted for human-readable results from the MCMC process, with
the best-fit parameters and the :math:`$\pm 1\sigma$` uncertainties
"""
savestring = 'MCMC Best Fit Parameters And One-Sigma Uncertainties\n----------------------------------------------------\n\n'
labels = ['Rp/Rs','a/Rs','Inclination','Mid-transit time']
for i in range(len(labels)):
savestring += '%s:\t%s\t +%s / -%s \n' % (labels[i],self.MCMC_bestp[i],self.MCMC_uncertainties[i][0],self.MCMC_uncertainties[i][1])
return savestring
def czechETDstring(self,apertureRadiusIndex):
"""
Returns a string containing the tab delimited light curve data for submission
to the *Czech Astronomical Society's Exoplanet Transit Database*, for submission
here: http://var2.astro.cz/ETD/protocol.php
Parameters
----------
apertureRadiusIndex : int
Index of the aperture radius from which to use for the light curve fluxes
and errors.
"""
N_measurements = len(self.lightCurves[apertureRadiusIndex])
outputString = ''
for i in xrange(N_measurements):
outputString += '\t'.join(map(str,[self.times[i],self.lightCurves[apertureRadiusIndex][i],\
self.lightCurveErrors[apertureRadiusIndex][i]]))
outputString += '\n'
return outputString
# def plotMCMC(self):
# bestp = self.MCMC_bestp
# allparams = self.MCMC_allparams
# x = self.times
# y = self.lightCurve
# sigma_y = self.lightCurveError
#
# ##############################
# # Prepare figures
# fig = plt.figure()
# ax1 = fig.add_subplot(331)
# ax2 = fig.add_subplot(332)
# ax3 = fig.add_subplot(333)
# ax4 = fig.add_subplot(334)
# ax5 = fig.add_subplot(335)
# ax6 = fig.add_subplot(336)
# ax7 = fig.add_subplot(337)
# ax8 = fig.add_subplot(338)
# ax9 = fig.add_subplot(339)
# yfit = occult4params(x,bestp)
# ax1.errorbar(x,y,yerr=sigma_y,fmt='o-')
# ax1.plot(x,yfit,'r')
# ax1.set_title("Fit with MCMC")
#
# ##############################
# # Plot traces and histograms of mcmc params
# p = allparams[0,:]
# ap = allparams[1,:]
# i = allparams[2,:]
# t0 = allparams[3,:]
# abscissa = np.arange(len(allparams[0,:])) ## Make x-axis for trace plots
# burnFraction = 0.20 ## "burn" or ignore the first 20% of the chains
#
# ax2.plot(abscissa,p,'k.')
# ax2.set_title('p trace')
# ax2.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# ax3.plot(abscissa,ap,'k.')
# ax3.set_title('ap trace')
# ax3.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# ax4.plot(abscissa,i,'k.')
# ax4.set_title('i trace')
# ax4.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# ax5.plot(abscissa,t0,'k.')
# ax5.set_title('t0 trace')
# ax5.axvline(ymin=0,ymax=1,x=burnFraction*len(abscissa),linestyle=':')
#
# def histplot(parameter,axis,title,bestFitParameter):
# postburn = parameter[burnFraction*len(parameter):len(parameter)] ## Burn beginning of chain
# Nbins = 15 ## Plot histograms with 15 bins
# n, bins, patches = axis.hist(postburn, Nbins, normed=0, facecolor='white') ## Generate histogram
# plus,minus = oscaar.fitting.get_uncertainties(postburn,bestFitParameter) ## Calculate uncertainties on best fit parameter
# axis.axvline(ymin=0,ymax=1,x=bestFitParameter+plus,ls=':',color='r') ## Plot vertical lines representing uncertainties
# axis.axvline(ymin=0,ymax=1,x=bestFitParameter-minus,ls=':',color='r')
# axis.set_title(title)
# ## Plot the histograms
# histplot(p,ax6,'p',bestp[0])
# histplot(ap,ax7,'ap',bestp[1])
# histplot(i,ax8,'i',bestp[2])
# histplot(t0,ax9,'t0',bestp[3])
#
# plt.savefig("mcmc_results.png",bbox_inches='tight') ## Save plot
# plt.show()
def plotLightCurve_multirad_output(self):
plt.show()
def plotLightCurve_multirad(self,pointsPerBin=10):
for apertureRadiusIndex in range(len(self.apertureRadii)):
meanTimeInt = int(np.rint(np.mean(self.times)))
offsetTimes = self.times - meanTimeInt
binnedTime, binnedFlux, binnedStd = mathMethods.medianBin(offsetTimes,self.lightCurves[apertureRadiusIndex],pointsPerBin)
fig = plt.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
fig.canvas.set_window_title('OSCAAR')
axis = fig.add_subplot(111)
def format_coord(x, y):
'''Function to give data value on mouse over plot.'''
return 'JD=%1.5f, Flux=%1.4f' % (meanTimeInt+x, y)
axis.format_coord = format_coord
axis.errorbar(offsetTimes,self.lightCurves[apertureRadiusIndex],yerr=self.lightCurveErrors[apertureRadiusIndex],fmt='k.',ecolor='gray')
axis.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
axis.axvline(ymin=0,ymax=1,x=self.ingress-meanTimeInt,color='k',ls=':')
axis.axvline(ymin=0,ymax=1,x=self.egress-meanTimeInt,color='k',ls=':')
axis.set_title('Light curve for aperture radius: %s' % self.apertureRadii[apertureRadiusIndex])
axis.set_xlabel(('Time - %i (JD)' % meanTimeInt))
axis.set_ylabel('Relative Flux')
plt.ioff()
self.plotLightCurve_multirad_output()
| mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/decomposition/tests/test_online_lda.py | 1 | 13327 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| mit |
DerThorsten/boring_spaghetti | exp/play.py | 1 | 4464 | import vigra
import opengm
import numpy
import matplotlib.pyplot as plt
img = vigra.readImage('/home/tbeier/datasets/BSR/BSDS500/data/images/train/56028.jpg')
img = vigra.readImage('/home/tbeier/datasets/BSR/BSDS500/data/images/train/118035.jpg')
img = img[::1, ::1,:]
grad = vigra.filters.gaussianGradientMagnitude(vigra.colors.transform_Lab2RGB(img), 1.5).squeeze()
grad -= grad.min()
grad /= grad.max()
grad2 = grad.copy()
#grad2[numpy.where(grad2<0.3)] = 0
grad2 = numpy.exp(1.5*grad2)-1.0
show = True
if show:
imgplot = plt.imshow(grad2.swapaxes(0,1))
plt.colorbar()
plt.show()
expGrad = numpy.exp(-2.1*grad)
w = 2*expGrad -1.0
w-=w.min()*2.5
if show:
imgplot = plt.imshow(w.swapaxes(0,1))
plt.colorbar()
plt.show()
gm = opengm.adder.gridPatchAffinityGm(grad2.astype(numpy.float64), w.astype(numpy.float64), 40, 3 ,20, 0.01)
print gm
verbose = True
useQpbo = False
useCgc = False
useWs = False
with opengm.Timer("with new method"):
fusionParam = opengm.InfParam(fusionSolver = 'cgc', planar=False)
arg = None
if useQpbo:
infParam = opengm.InfParam(
numStopIt=0,
numIt=200,
generator='qpboBased',
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
# inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
if verbose:
inf.infer(visitor)
else:
inf.infer()
inf.infer()
arg = inf.arg()
proposalParam = opengm.InfParam(
randomizer = opengm.weightRandomizer(noiseType='normalAdd',noiseParam=1.700000001, ignoreSeed=False),
stopWeight=0.0,
reduction=0.999,
setCutToZero=False
)
infParam = opengm.InfParam(
numStopIt=20,
numIt=200,
generator='randomizedHierarchicalClustering',
proposalParam=proposalParam,
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
if arg is not None:
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
if verbose:
inf.infer(visitor)
else:
inf.infer()
arg = inf.arg()
if useWs:
print "ws"
proposalParam = opengm.InfParam(
randomizer = opengm.weightRandomizer(noiseType='normalAdd',noiseParam=1.100000001,ignoreSeed=False),
seedFraction = 0.005
)
infParam = opengm.InfParam(
numStopIt=20,
numIt=10,
generator='randomizedWatershed',
proposalParam=proposalParam,
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
if arg is not None:
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
if verbose:
inf.infer(visitor)
else:
inf.infer()
arg = inf.arg()
if useQpbo:
infParam = opengm.InfParam(
numStopIt=0,
numIt=40,
generator='qpboBased',
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=10)
if useCgc:
print "cgc"
infParam = opengm.InfParam(
planar=False,
startFromThreshold=False,
doCutMove = False,
doGlueCutMove = True,
maxIterations = 1
)
inf=opengm.inference.Cgc(gm, parameter=infParam)
if arg is not None:
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=10)
if verbose:
inf.infer(visitor)
else:
inf.infer()
arg = inf.arg()
print gm.evaluate(arg)
argImg = arg.reshape(img.shape[0:2])
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( argImg.max()+1,3))
pylab.imshow ( argImg.swapaxes(0,1), cmap = cmap)
pylab.show()
| mit |
jmontoyam/mne-python | mne/io/array/tests/test_array.py | 2 | 4132 | # Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import matplotlib
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from nose.tools import assert_equal, assert_raises, assert_true
from mne import find_events, Epochs, pick_types
from mne.io import read_raw_fif
from mne.io.array import RawArray
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.meas_info import create_info, _kind_dict
from mne.utils import slow_test, requires_version, run_tests_if_main
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests might throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
fif_fname = op.join(base_dir, 'test_raw.fif')
@slow_test
@requires_version('scipy', '0.12')
def test_array_raw():
"""Test creating raw from array
"""
import matplotlib.pyplot as plt
# creating
raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(2, 5)
data, times = raw[:, :]
sfreq = raw.info['sfreq']
ch_names = [(ch[4:] if 'STI' not in ch else ch)
for ch in raw.info['ch_names']] # change them, why not
# del raw
types = list()
for ci in range(101):
types.extend(('grad', 'grad', 'mag'))
types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels
types.extend(['stim'] * 9)
types.extend(['eeg'] * 60)
# wrong length
assert_raises(ValueError, create_info, ch_names, sfreq, types)
# bad entry
types.append('foo')
assert_raises(KeyError, create_info, ch_names, sfreq, types)
types[-1] = 'eog'
# default type
info = create_info(ch_names, sfreq)
assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
# use real types
info = create_info(ch_names, sfreq, types)
raw2 = _test_raw_reader(RawArray, test_preloading=False,
data=data, info=info, first_samp=2 * data.shape[1])
data2, times2 = raw2[:, :]
assert_allclose(data, data2)
assert_allclose(times, times2)
assert_true('RawArray' in repr(raw2))
assert_raises(TypeError, RawArray, info, data)
# filtering
picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
assert_equal(len(picks), 4)
raw_lp = raw2.copy()
raw_lp.filter(None, 4.0, h_trans_bandwidth=4.,
filter_length='auto', picks=picks, n_jobs=2, phase='zero',
fir_window='hamming')
raw_hp = raw2.copy()
raw_hp.filter(16.0, None, l_trans_bandwidth=4.,
filter_length='auto', picks=picks, n_jobs=2, phase='zero',
fir_window='hamming')
raw_bp = raw2.copy()
raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4.,
h_trans_bandwidth=4., filter_length='auto', picks=picks,
phase='zero', fir_window='hamming')
raw_bs = raw2.copy()
raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4.,
filter_length='auto', picks=picks, n_jobs=2, phase='zero',
fir_window='hamming')
data, _ = raw2[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
sig_dec = 15
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
# plotting
raw2.plot()
raw2.plot_psd()
plt.close('all')
# epoching
events = find_events(raw2, stim_channel='STI 014')
events[:, 2] = 1
assert_true(len(events) > 2)
epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True,
add_eeg_ref=False)
epochs.plot_drop_log()
epochs.plot()
evoked = epochs.average()
evoked.plot()
assert_equal(evoked.nave, len(events) - 1)
plt.close('all')
# complex data
rng = np.random.RandomState(0)
data = rng.randn(1, 100) + 1j * rng.randn(1, 100)
raw = RawArray(data, create_info(1, 1000., 'eeg'))
assert_allclose(raw._data, data)
run_tests_if_main()
| bsd-3-clause |
Carmezim/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 14 | 46097 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return const, const, control_flow_ops.group(train_op_1, training_op_2)
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, control_flow_ops.no_op()
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertSameElements(
['bogus_lookup', 'feature'],
graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS))
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
goirijo/thermoplotting | old/testing/testing.py | 1 | 1292 | import numpy as np
import xray
import glob
import re
import thermoplotting as tp
import matplotlib.pyplot as plt
from scipy import integrate
from collections import OrderedDict
headerdict=OrderedDict([("formation_energy","U"),
("Ni","N0"),
("Al","N1"),
("generalized_enthalpy","phi"),
("temperature","T"),
("beta","b"),
("mu_Ni","mu0"),
("mu_Al","mu1")
])
controlledvar=["mu0","mu1","T"]
#Integrate heating run from low T to high T
heatingnames=glob.glob("./dataset/heating_nuke_0/mu-*/tabulated_averages.txt")
heatingdata=tp.ThermoArray(heatingnames, ["mu0","mu1","T"], headerdict)
heatingphidata=heatingdata.data_view("phi")
heatingbetadata=heatingdata.data_view("b")
heatingPHIref=heatingphidata[0,:,:]
heatingPHIdata=tp.grandcanonical.integrate.beta(heatingbetadata, heatingphidata, heatingPHIref, 0)
heatingdata=heatingdata.push_back(heatingPHIdata,"omega")
fig=plt.figure()
ax=fig.add_subplot(311)
y1=heatingPHIdata[:,0,0]
ax.scatter(heatingdata.data_view("T")[:,0,0], y1)
ax=fig.add_subplot(312)
y2=heatingdata.data_view("omega")[:,0,0]
ax.scatter(heatingdata.data_view("T")[:,0,0],y2)
ax=fig.add_subplot(313)
y3=y2-y1
ax.scatter(heatingdata.data_view("T")[:,0,0],y3)
plt.show()
| mit |
Winterflower/mdf | mdf/viewer/panels/plotpanel.py | 3 | 2675 | """
Panel for showing graphs
"""
import wx
import numpy as np
# force matplotlib to use whatever wx is installed
import sys
sys.frozen = True
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
class PlotPanel(wx.Panel):
"""
The PlotPanel has a Figure and a Canvas. OnSize events simply set a
flag, and the actual resizing of the figure is triggered by an Idle event.
See:
http://www.scipy.org/Matplotlib_figure_in_a_wx_panel
"""
def __init__(self, parent, dataframes, color=None, dpi=None, **kwargs):
# initialize Panel
if 'id' not in kwargs.keys():
kwargs['id'] = wx.ID_ANY
if 'style' not in kwargs.keys():
kwargs['style'] = wx.NO_FULL_REPAINT_ON_RESIZE
wx.Panel.__init__(self, parent, **kwargs)
self.parent = parent
self.dataframes = dataframes
# initialize matplotlib stuff
self.figure = Figure(None, dpi)
self.figure.autofmt_xdate()
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.SetColor(color)
#self._SetSize((800, 600))
self.draw()
self._resizeflag = False
self.Bind(wx.EVT_IDLE, self._onIdle)
self.Bind(wx.EVT_SIZE, self._onSize)
def SetColor(self, rgbtuple=None):
"""Set figure and canvas colours to be the same."""
if rgbtuple is None:
rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()
clr = [c/255. for c in rgbtuple]
self.figure.set_facecolor( clr )
self.figure.set_edgecolor( clr )
self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))
def _onSize(self, event):
self._resizeflag = True
def _onIdle(self, evt):
if self._resizeflag:
self._resizeflag = False
self._SetSize()
def _SetSize(self, size=None):
if size is None:
size = tuple(self.GetClientSize())
self.SetSize(size)
self.canvas.SetSize(size)
self.figure.set_size_inches(float(size[0])/self.figure.get_dpi(),
float(size[1])/self.figure.get_dpi())
def draw(self):
ax = self.figure.add_subplot(111)
for dataframe in self.dataframes:
x = dataframe.index
for col in dataframe.columns:
empty = dataframe[col].count() == 0
y = dataframe[col].values if not empty else np.zeros(x.shape)
ax.plot(x, y, label=col)
try:
self.figure.autofmt_xdate()
except:
pass
ax.legend(loc="best")
ax.grid()
| mit |
vodev/vocloud-RDF | data_set_handler.py | 1 | 3705 | import io
import os
import pandas as pd
import pyfits
from astropy.io.votable import parse
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
from sklearn.cross_validation import StratifiedKFold, KFold
from sklearn.cross_validation import train_test_split
def load_header(uri):
votable = parse(uri)
table = votable.get_table_by_index(0)
values = table.array[0][1].tolist()
return [str(value) for value in values] + ["class"]
def load_set(uri, format='csv', header=None, delimiter=','):
print(str(header))
return pd.read_csv(uri, header=None, names=header, sep=None, dtype=None,
na_values='?', skipinitialspace=True)
def _to_votable(data, file_name):
votable = VOTableFile()
resource = Resource()
votable.resources.append(resource)
table = Table(votable)
resource.tables.append(table)
columns = data.columns
if data.columns[-1] == 'class':
columns = columns[:-1]
fields = [
Field(votable, name="intensities", datatype="double", arraysize='*')]
table.fields.extend(fields)
table.create_arrays(1)
table.array[0] = columns.tolist()
votable.to_xml(file_name)
def _write_csv(data, uri, header=None, separator=',', dtypes=None):
with io.open(uri, 'w', encoding='utf-8') as out:
if header is not None:
_to_votable(header,
"meta.xml")
for row in data:
rec_num = 0
for record in row:
val = record
if (dtypes is not None and 'int' in str(dtypes[rec_num])):
val = int(val)
elif (dtypes is not None and 'float' in str(dtypes[rec_num])):
val = float(val)
out.write(str(val))
if (rec_num != len(row) - 1):
out.write(separator)
rec_num += 1
out.write('\n')
def _parse_fits(uri):
fits = pyfits.open(uri, memmap=False)
dat = fits[1].data
fits.close()
return dat.tolist()
def split_train_set(uri, label=-1, ratio=0.67, sep=',', header=None):
header_num = None if not header else 0
array = pd.read_csv(uri, delimiter=sep, skipinitialspace=True,
na_values=['?'])
train, test = train_test_split(array.values, train_size=ratio)
base_name, ext = os.path.splitext(os.path.basename(uri))
directory = os.path.dirname(uri)
train_name = directory + base_name + '_train' + ext
test_name = directory + base_name + '_score' + ext
_write_csv(train, train_name, separator=sep, header=None)
_write_csv(test, test_name, separator=sep, header=None)
return (train_name, test_name)
def create_xvalidation_files(data_uri, data_conf, header, configuration,
target=None, base_folder='./result/xvalidation'):
df = pd.read_csv(data_uri, sep=None, header=None, names=header,
skipinitialspace=True, na_values=['?'])
kfold = None
labels = df[target].values
folds = configuration['folds']
if (target is not None):
kfold = StratifiedKFold(labels, folds)
else:
kfold = KFold(n=len(labels), n_folds=folds)
if not os.path.exists(base_folder):
os.makedirs(os.path.abspath(base_folder))
i = 1
uris = []
for train, test in kfold:
train_uri = base_folder + '/train_' + str(i)
test_uri = base_folder + '/test_' + str(i)
_write_csv(df.values[train], train_uri, header=header, dtypes=df.dtypes)
_write_csv(df.values[test], test_uri, header=header, dtypes=df.dtypes)
uris.append((train_uri, test_uri))
i += 1
return uris
| mit |
gotomypc/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
samzhang111/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
buptlishantao/stock | tushare/stock/fundamental.py | 3 | 14409 | # -*- coding:utf-8 -*-
"""
基本面数据接口
Created on 2015/01/18
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from tushare.stock import cons as ct
import lxml.html
from lxml import etree
import re
from pandas.compat import StringIO
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_stock_basics():
"""
获取沪深上市公司基本情况
Return
--------
DataFrame
code,代码
name,名称
industry,细分行业
area,地区
pe,市盈率
outstanding,流通股本
totals,总股本(万)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
eps,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
"""
request = Request(ct.ALL_STOCK_BASICS_FILE)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
df = pd.read_csv(StringIO(text), dtype={'code':'object'})
df = df.set_index('code')
return df
def get_report_data(year, quarter):
"""
获取业绩报表数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
eps,每股收益
eps_yoy,每股收益同比(%)
bvps,每股净资产
roe,净资产收益率(%)
epcf,每股现金流量(元)
net_profits,净利润(万元)
profits_yoy,净利润同比(%)
distrib,分配方案
report_date,发布日期
"""
if ct._check_input(year,quarter) is True:
ct._write_head()
df = _get_report_data(year, quarter, 1, pd.DataFrame())
if df is not None:
df = df.drop_duplicates('code')
return df
def _get_report_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.REPORT_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'],
year, quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop(11, axis=1)
df.columns = ct.REPORT_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_report_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except:
pass
def get_profit_data(year, quarter):
"""
获取盈利能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
roe,净资产收益率(%)
net_profit_ratio,净利率(%)
gross_profit_rate,毛利率(%)
net_profits,净利润(万元)
eps,每股收益
business_income,营业收入(百万元)
bips,每股主营业务收入(元)
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_profit_data(year, quarter, 1, pd.DataFrame())
if data is not None:
data = data.drop_duplicates('code')
return data
def _get_profit_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.PROFIT_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns=ct.PROFIT_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_profit_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except:
pass
def get_operation_data(year, quarter):
"""
获取营运能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
arturnover,应收账款周转率(次)
arturndays,应收账款周转天数(天)
inventory_turnover,存货周转率(次)
inventory_days,存货周转天数(天)
currentasset_turnover,流动资产周转率(次)
currentasset_days,流动资产周转天数(天)
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_operation_data(year, quarter, 1, pd.DataFrame())
if data is not None:
data = data.drop_duplicates('code')
return data
def _get_operation_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.OPERATION_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns=ct.OPERATION_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_operation_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except:
pass
def get_growth_data(year, quarter):
"""
获取成长能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
mbrg,主营业务收入增长率(%)
nprg,净利润增长率(%)
nav,净资产增长率
targ,总资产增长率
epsg,每股收益增长率
seg,股东权益增长率
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_growth_data(year, quarter, 1, pd.DataFrame())
if data is not None:
data = data.drop_duplicates('code')
return data
def _get_growth_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.GROWTH_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns=ct.GROWTH_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_growth_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except:
pass
def get_debtpaying_data(year, quarter):
"""
获取偿债能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
currentratio,流动比率
quickratio,速动比率
cashratio,现金比率
icratio,利息支付倍数
sheqratio,股东权益比率
adratio,股东权益增长率
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
df = _get_debtpaying_data(year, quarter, 1, pd.DataFrame())
if df is not None:
df = df.drop_duplicates('code')
return df
def _get_debtpaying_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.DEBTPAYING_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = ct.DEBTPAYING_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_debtpaying_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except:
pass
def get_cashflow_data(year, quarter):
"""
获取现金流量数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
cf_sales,经营现金净流量对销售收入比率
rateofreturn,资产的经营现金流量回报率
cf_nm,经营现金净流量与净利润的比率
cf_liabilities,经营现金净流量对负债比率
cashflowratio,现金流量比率
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
df = _get_cashflow_data(year, quarter, 1, pd.DataFrame())
if df is not None:
df = df.drop_duplicates('code')
return df
def _get_cashflow_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.CASHFLOW_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = ct.CASHFLOW_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_cashflow_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except:
pass
def _data_path():
import os
import inspect
caller_file = inspect.stack()[1][1]
pardir = os.path.abspath(os.path.join(os.path.dirname(caller_file), os.path.pardir))
return os.path.abspath(os.path.join(pardir, os.path.pardir))
| bsd-3-clause |
depet/scikit-learn | examples/plot_hmm_sampling.py | 8 | 2045 | """
==================================
Demonstration of sampling from HMM
==================================
This script shows how to sample points from a Hiden Markov Model (HMM):
we use a 4-components with specified mean and covariance.
The plot show the sequence of observations generated with the transitions
between them. We can see that, as specified by our transition matrix,
there are no transition between component 1 and 3.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import hmm
##############################################################
# Prepare parameters for a 3-components HMM
# Initial population probability
start_prob = np.array([0.6, 0.3, 0.1, 0.0])
# The transition matrix, note that there are no transitions possible
# between component 1 and 4
trans_mat = np.array([[0.7, 0.2, 0.0, 0.1],
[0.3, 0.5, 0.2, 0.0],
[0.0, 0.3, 0.5, 0.2],
[0.2, 0.0, 0.2, 0.6]])
# The means of each component
means = np.array([[0.0, 0.0],
[0.0, 11.0],
[9.0, 10.0],
[11.0, -1.0],
])
# The covariance of each component
covars = .5 * np.tile(np.identity(2), (4, 1, 1))
# Build an HMM instance and set parameters
model = hmm.GaussianHMM(4, "full", start_prob, trans_mat,
random_state=42)
# Instead of fitting it from the data, we directly set the estimated
# parameters, the means and covariance of the components
model.means_ = means
model.covars_ = covars
###############################################################
# Generate samples
X, Z = model.sample(500)
# Plot the sampled data
plt.plot(X[:, 0], X[:, 1], "-o", label="observations", ms=6,
mfc="orange", alpha=0.7)
# Indicate the component numbers
for i, m in enumerate(means):
plt.text(m[0], m[1], 'Component %i' % (i + 1),
size=17, horizontalalignment='center',
bbox=dict(alpha=.7, facecolor='w'))
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
johnbachman/ras_model | src/REM/RAF_module/BRAF_module.py | 6 | 3777 | """ Detailed mehanistic model of BRAF based on Neal Rossen paper. """
from pysb import *
from pysb.util import alias_model_components
def monomers():
Monomer('BRAF', ['ras', 'd', 'vem', 'erk'])
Monomer('Vem', ['raf'])
# IC values
# --------
Parameter('BRAF_0', 1e5)
Parameter('Vem_0', 1000)
alias_model_components()
# Initial conditions
# ------------------
Initial(BRAF(d=None, ras=None, erk=None, vem=None), BRAF_0)
Initial(Vem(raf=None), Vem_0)
def BRAF_dynamics():
# Parameters
# -----------
Parameter('kaf', 1e-6)
Parameter('kar', 1)
Parameter('kbf', 0.5) # 1)
Parameter('kbr', 1e-11)
Parameter('kcf', 1)
Parameter('kcr', 0.0001)
Parameter('kdf', 1)
Parameter('kdr', 0.1)
Parameter('kef', 1e-2)
Parameter('ker', 0.1)
Parameter('kff', 1e-5)
Parameter('kfr', 1)
Parameter('kgf', 1e-11)
Parameter('kgr', 1)
Parameter('khf', 1e-2) # 100)
Parameter('khr', 1) # 1)
Parameter('koff', 1)
alias_model_components()
# Rules
# -----
# BRAF dimerization
Rule('BRAF_dimerization',
BRAF(d=None, ras=None) + BRAF(d=None, ras=None, vem=None) <>
BRAF(d=1, ras=None) % BRAF(d=1, ras=None, vem=None), kaf, kar)
# KRAS binding BRAF monomers
Rule('KRAS_binding_BRAF_monomers',
BRAF(ras=None, d=None) + KRAS(raf=None, state='gtp') <>
BRAF(ras=1, d=None) % KRAS(raf=1, state='gtp'), kdf, kdr)
# KRAS binding BRAF dimers
Rule('KRAS_binding_BRAF_dimers',
BRAF(ras=None, d=1) % BRAF(d=1) +
KRAS(raf=None, state='gtp') <>
BRAF(ras=2, d=1) % BRAF(d=1) %
KRAS(raf=2, state='gtp'), kbf, kbr)
# KRAS:BRAF dimerization
Rule('KRASBRAF_dimerization',
BRAF(d=None, ras=ANY) + BRAF(d=None, ras=ANY, vem=None) <>
BRAF(d=1, ras=ANY) % BRAF(d=1, ras=ANY, vem=None), kcf, kcr)
# BRAF:Vem dimerization to give 2(BRAF:Vem) g = a * f
Rule('BRAF_Vem_dimerization',
BRAF(d=None, ras=None, vem=ANY) + BRAF(d=None, ras=None, vem=ANY) <>
BRAF(d=1, ras=None, vem=ANY) % BRAF(d=1, ras=None, vem=ANY), kgf, kgr)
# KRAS:BRAF:Vem dimerization to give 2( KRAS:BRAF:Vem) h = c * a
Rule('KRAS_BRAF_Vem_dimerization',
BRAF(d=None, ras=ANY, vem=ANY) + BRAF(d=None, ras=ANY, vem=ANY) <>
BRAF(d=1, ras=ANY, vem=ANY) % BRAF(d=1, ras=ANY, vem=ANY), khf, khr)
# 1st Vemurafenib binds
Rule('First_binding_Vemurafenib',
BRAF(vem=None) % BRAF(vem=None) + Vem(raf=None) <>
BRAF(vem=1) % BRAF(vem=None) % Vem(raf=1), kef, ker)
# 2nd Vemurafenib binding
Rule('Second_binding_vemurafenib',
BRAF(vem=None) % BRAF(vem=ANY) + Vem(raf=None) <>
BRAF(vem=1) % BRAF(vem=ANY) % Vem(raf=1), kff, kfr)
# Vemurafenib binds BRAF monomer
Rule('Vemurafenib_binds_BRAF_monomer',
BRAF(vem=None, d=None) + Vem(raf=None) <>
BRAF(vem=1, d=None) % Vem(raf=1), kef, ker)
# Release KRAS:GDP from BRAF
Rule('KRAS_GDP_dissoc_BRAF',
KRAS(state='gdp', raf=1) % BRAF(ras=1) >>
KRAS(state='gdp', raf=None) + BRAF(ras=None), koff)
def observables():
# Observables
# ----------
Observable('BRAF_WT_active',
BRAF(d=ANY, vem=None))
Observable('BRAF_V600E_active',
BRAF(vem=None))
# if __name__ == '__main__':
# from pysb.integrate import Solver
# import matplotlib.pyplot as plt
# import numpy as np
# ts = np.linspace(0, 100, 100)
# solver = Solver(model, ts)
# solver.run()
# plt.figure()
# plt.plot(ts, solver.yobs['BRAF_WT_active'], label='WT')
# plt.plot(ts, solver.yobs['BRAF_V600E_active'], label='V600E')
# plt.legend()
# plt.show()
| mit |
pedrocastellucci/playground | vrp_scip_cg.py | 1 | 11908 | from pyscipopt import Model, quicksum, Pricer, SCIP_RESULT, SCIP_PARAMSETTING
# Using networkx for drawing the solution:
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
class DataVRP:
cap = None # Capacity of the truck
nodes = {} # Position of the nodes
depots = None # We are assuming one depot
demands = {} # Demand for each node. Demand of the depot is zero
costs = {} # Costs from going from i to j
def __init__(self, filename):
with open(filename) as fd:
lines = fd.readlines()
i = 0
size = None # Number of nodes
while i < len(lines):
line = lines[i]
if line.find("DIMENSION") > -1:
size = self.sepIntValue(line)
elif line.find("CAPACITY") > -1:
self.cap = self.sepIntValue(line)
elif line.find("NODE_COORD_SECTION") > -1:
i += 1
line = lines[i]
for _ in range(size):
n, x, y = [int(val.strip()) for val in line.split()]
i += 1 # Last step starts the DEMAND_SECTION
self.nodes[n] = (x, y)
line = lines[i]
# Not elif because of NODE_COORD_SECTION process.
if line.find("DEMAND_SECTION") > -1:
i += 1
line = lines[i]
for _ in range(size):
n, dem = [int(val.strip()) for val in line.split()]
i += 1
self.demands[n] = dem
line = lines[i]
if line.find("DEPOT_SECTION") > -1:
i += 1
depots = []
depot = int(lines[i].strip())
while depot >= 0:
depots.append(depot)
i += 1
depot = int(lines[i].strip())
assert(len(depots) == 1)
self.depot = depots[0]
i += 1
self.computeCostMatrix()
def sepIntValue(self, line):
value = line.split(":")[-1].strip()
return int(value)
def computeCostMatrix(self):
for (p1, (x1, y1)) in self.nodes.items():
for (p2, (x2, y2)) in self.nodes.items():
self.costs[p1, p2] = ((x1 - x2)**2 + (y1 - y2)**2)**0.5
class VRPpricer(Pricer):
# Binary variables z_r indicating whether
# pattern r is used:
z = None
# Data object with input data for the problem:
data = None
# Patterns currently in the problem:
patterns = None
# Function used to compute whether a
# client is visited by a particular pattern.
isClientVisited = None
# Function used to compute the cost of a pattern:
patCost = None
# List of client nodes:
clientNodes = []
# Model that holds the sub-problem:
subMIP = None
# Maximum number of patterns to be created:
maxPatterns = np.Inf
def __init__(self, z, cons, data, patterns,
costs, isClientVisited, patCost, maxPatterns):
self.z, self.cons, self.data, self.patterns = z, cons, data, patterns
self.isClientVisited = isClientVisited
self.patCost = patCost
self.maxPatterns = maxPatterns
for i in data.nodes:
if i != data.depot:
self.clientNodes.append(i)
super()
def pricerredcost(self):
'''
This is a method from the Pricer class.
It is used for adding a new column to the problem.
'''
# Maximum number of patterns reached:
print("Generated %d patterns" % len(self.patterns))
if len(self.patterns) >= self.maxPatterns:
print("Max patterns reached!")
return {'result': SCIP_RESULT.SUCCESS}
colRedCos, pattern = self.getColumnFromMIP(30) # 30 seconds of time limit
if colRedCos < -0.00001:
newPattern = pattern
obj = self.patCost(newPattern)
curVar = len(self.z)
newVar = self.model.addVar("New_" + str(curVar), vtype="C",
lb=0.0, ub=1.0, obj=obj,
pricedVar=True)
for cs in self.cons:
# Get client from constraint name:
client = int(cs.name.split("_")[-1].strip())
coeff = self.isClientVisited(client, newPattern)
self.model.addConsCoeff(cs, newVar, coeff)
self.patterns.append(newPattern)
self.z[curVar] = newVar
return {'result': SCIP_RESULT.SUCCESS}
def pricerinit(self):
'''
A method of the Pricer class. It is used to convert
the problem into its original form.
'''
for i, c in enumerate(self.cons):
self.cons[i] = self.model.getTransformedCons(c)
def getColumnFromMIP(self, timeLimit):
def getPatternFromSolution(subMIP):
edges = []
for x in subMIP.getVars():
if "x" in x.name:
if subMIP.getVal(x) > 0.99:
i, j = x.name.split("_")[1:]
edges.append((int(i), int(j)))
return edges
# Storing the values of the dual solutions:
dualSols = {}
for c in self.cons:
i = int(c.name.split("_")[-1].strip())
dualSols[i] = self.model.getDualsolLinear(c)
# Model for the sub-problem:
subMIP = Model("VRP-Sub")
subMIP.setPresolve(SCIP_PARAMSETTING.OFF)
subMIP.setMinimize
subMIP.setRealParam("limits/time", timeLimit)
# Binary variables x_ij indicating whether the vehicle
# traverses edge (i, j)
x = {}
for i in self.data.nodes:
for j in self.data.nodes:
if i != j:
x[i, j] = subMIP.addVar(vtype="B", obj=self.data.costs[i, j] - (dualSols[i] if i in self.clientNodes else 0), name="x_%d_%d" % (i, j))
# Non negative variables u_i indicating the demand served up to node i:
u = {}
for i in self.data.nodes:
u[i] = subMIP.addVar(vtype="C", lb=0, ub=self.data.cap, obj=0.0, name="u_%d" % i)
for j in self.clientNodes:
subMIP.addCons(quicksum(x[i, j] for i in self.data.nodes if i != j) <= 1)
for h in self.clientNodes:
subMIP.addCons(quicksum(x[i, h] for i in self.data.nodes if i != h) ==
quicksum(x[h, i] for i in self.data.nodes if i != h))
for i in self.data.nodes:
for j in self.clientNodes:
if i != j:
subMIP.addCons(u[j] >= u[i] + self.data.demands[j]*x[i, j] - self.data.cap*(1 - x[i, j]))
subMIP.addCons(quicksum(x[self.data.depot, j] for j in self.clientNodes) <= 1)
subMIP.hideOutput()
subMIP.optimize()
mipSol = subMIP.getBestSol()
obj = subMIP.getSolObjVal(mipSol)
pattern = getPatternFromSolution(subMIP)
return obj, pattern
class VRPsolver:
data = None
clientNodes = []
# A pattern is a feasible route for visiting
# some clients:
patterns = None
# The master model:
master = None
# The pricer object:
pricer = None
# Max patterns for column generation:
maxPatterns = np.Inf
# If we are solving the linear column generation this
# must be False.
# If it is True, we solve the problem of selecting
# the best patterns to use -- without column generation.
integer = False
def __init__(self, vrpData, maxPatterns):
self.data = vrpData
self.clientNodes = [n for n in self.data.nodes.keys() if n != self.data.depot]
self.maxPatterns = maxPatterns
def genInitialPatterns(self):
'''
Generating initial patterns.
'''
patterns = []
for n in self.clientNodes:
patterns.append([(self.data.depot, n), (n, self.data.depot)])
self.patterns = patterns
def setInitialPatterns(self, patterns):
self.patterns = patterns
def addPatterns(self, patterns):
for pat in patterns:
self.patterns.append(pat)
def patCost(self, pat):
cost = 0.0
for (i, j) in pat:
cost += self.data.costs[i, j]
return cost
def isClientVisited(self, c, pat):
# Check if client c if visited in pattern c:
for (i, j) in pat:
if i == c or j == c:
return 1
return 0
def solve(self, integer=False):
'''
By default we solve a linear version of the column generation.
If integer is True than we solve the problem of finding the best
routes to be used without column generation.
'''
self.integer = integer
if self.patterns is None:
self.genInitialPatterns()
# Creating master Model:
master = Model("Master problem")
# Creating pricer:
if not integer:
master.setPresolve(SCIP_PARAMSETTING.OFF)
# Populating master model.
# Binary variables z_r indicating whether
# pattern r is used in the solution:
z = {}
for i, _ in enumerate(self.patterns):
z[i] = master.addVar(vtype="B" if integer else "C",
lb=0.0, ub=1.0, name="z_%d" % i)
# Set objective:
master.setObjective(quicksum(self.patCost(p)*z[i] for i, p in enumerate(self.patterns)),
"minimize")
clientCons = [None]*len(self.clientNodes)
for i, c in enumerate(self.clientNodes):
cons = master.addCons(
quicksum(self.isClientVisited(c, p)*z[i] for i, p in enumerate(self.patterns)) == 1,
"Consumer_%d" % c,
separate=False, modifiable=True)
clientCons[i] = cons
if not integer:
pricer = VRPpricer(z, clientCons, self.data, self.patterns,
self.data.costs, self.isClientVisited,
self.patCost, self.maxPatterns)
master.includePricer(pricer, "VRP pricer", "Identifying new routes")
self.pricer = pricer
if integer:
print("Finding the best patterns among:")
for p in self.patterns:
print(p)
self.master = master # Save master model.
master.optimize()
def printSolution(self):
if self.integer:
zVars = self.master.getVars()
else:
zVars = self.pricer.z
print(zVars)
usedPatterns = []
for i, z in enumerate(zVars):
if self.master.getVal(zVars[i]) > 0.01:
print(self.patterns[i], self.master.getVal(zVars[i]))
usedPatterns.append(self.patterns[i])
return usedPatterns
def drawSolution(self):
patterns = self.printSolution()
graph = nx.DiGraph()
for pat in patterns:
graph.add_edges_from(pat)
nx.draw_networkx_nodes(graph, self.data.nodes)
nx.draw_networkx_edges(graph, self.data.nodes)
nx.draw_networkx_labels(graph, self.data.nodes)
plt.show()
if __name__ == "__main__":
data = DataVRP("./data/A-VRP/A-n32-k5.vrp")
# data = DataVRP("toy15.vrp")
solver = VRPsolver(data, 180)
solver.solve()
usedPatterns = solver.printSolution()
solver.drawSolution()
solver.setInitialPatterns(solver.patterns)
# solver.addPatterns(solver.patterns)
solver.solve(integer=True)
solver.drawSolution()
| gpl-3.0 |
abhitopia/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 22 | 57502 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
gpersistence/tstop | scripts/rips_plotter.py | 1 | 11357 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
import sys
import csv
import os
import colorsys
import random
import multiprocessing
import argparse
import importlib
from copy import copy
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import wx
from persistence.Datatypes.PersistenceDiagrams import PersistenceDiagram as PD
from persistence.Datatypes.Distances import Distance
from persistence.PersistenceGenerator import process
from persistence.Datatypes.Segments import Segment, Segments, max_label
from persistence.UCRSegments import UCRSegments
from persistence.Datatypes.JSONObject import load_data, save_data
from persistence.Datatypes.Configuration import Configuration, parse_index
import math
def format_runtime(runtime) :
hrs = math.floor(runtime / 3600)
mns = math.floor(runtime / 60) - hrs * 60
sec = runtime - hrs * 3600 - mns * 60
return "%02d:%02d:%02.2f" % (int(hrs), int(mns), sec)
class CanvasFrame(wx.Frame) :
def __init__(self, full_data) :
self.full_rips_persistences = [PD.fromJSONDict(entry[0]['full_diagram']) for entry in full_data]
self.full_rips_runtimes = [format_runtime(entry[0]['runtime']) for entry in full_data]
self.sparse_rips_persistences = [[PD.fromJSONDict(e['diagram']) for e in entry[1:]] for entry in full_data]
self.sparse_rips_distances = [[(e['bottleneck_distance'],e['wasserstein_l1'],e['wasserstein_l2']) for e in entry[1:]] \
for entry in full_data]
self.sparse_rips_sparsity = [[[float(s)*100.0 for s in e['sparsity']] if 'sparsity' in e else None for e in entry[1:]] for entry in full_data]
self.sparse_rips_runtimes = [[format_runtime(e['runtime']) for e in entry[1:]] for entry in full_data]
self.simplices = [int(entry['max_simplices']) if 'max_simplices' in entry else None for entry in full_data[0][1:]]
self.epsilons = [float(entry['epsilon']) if 'epsilon' in entry else None for entry in full_data[0][1:]]
wx.Frame.__init__(self, None, -1, "Full vs. Sparse Rips Filtration", size=(550, 550))
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_KEY_UP, self.KeyEvent)
self.segment_index = 0
self.full_rips_figure = Figure()
self.full_rips_axes = self.full_rips_figure.add_subplot(111)
self.full_rips_canvas = FigureCanvas(self, -1, self.full_rips_figure)
self.full_rips_title = self.full_rips_figure.suptitle("Persistence Diagram %s of %s runtime %s" % \
(self.segment_index + 1, len(self.full_rips_persistences), \
self.full_rips_runtimes[self.segment_index]))
self.simplices_index = 0
self.sparse_rips_figure = Figure()
self.sparse_rips_axes = self.sparse_rips_figure.add_subplot(111)
self.sparse_rips_canvas = FigureCanvas(self, -1, self.sparse_rips_figure)
self.sparse_rips_title = self.sparse_rips_figure.suptitle("max simplices %s" % \
(self.simplices[self.simplices_index],))
self.sizer = wx.GridBagSizer(hgap=5, vgap=5)
self.sizer.Add(NavigationToolbar2Wx(self.full_rips_canvas), pos=(0,0), span=(1,2), flag=wx.EXPAND)
self.sizer.AddGrowableCol(1,0)
self.sizer.Add(self.full_rips_canvas, pos=(1,0), span=(8,2), flag=wx.EXPAND)
self.sizer.AddGrowableCol(9,0)
self.sizer.Add(NavigationToolbar2Wx(self.sparse_rips_canvas), pos=(9,0), span=(1,2), flag=wx.EXPAND)
self.sizer.Add(self.sparse_rips_canvas, pos=(10,0), span=(8,2), flag=wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.background = self.full_rips_axes.figure.canvas.copy_from_bbox(self.full_rips_axes.bbox)
self.refresh()
def refresh(self) :
# Max of all the values so the different plots have the same scale
max_val = max([max([d[0] for d in self.full_rips_persistences[self.segment_index].points]),
max([d[1] for d in self.full_rips_persistences[self.segment_index].points]),
max([d[0] for d in self.sparse_rips_persistences[self.segment_index][self.simplices_index].points]),
max([d[1] for d in self.sparse_rips_persistences[self.segment_index][self.simplices_index].points])])
self.full_rips_canvas.restore_region(self.background)
self.full_rips_title.set_text("Persistence Diagram %s of %s runtime %s" % \
(self.segment_index + 1, len(self.full_rips_persistences), \
self.full_rips_runtimes[self.segment_index]))
self.full_rips_axes.cla()
data = self.full_rips_persistences[self.segment_index].points
if data != None and len(data) > 0 :
xs = [d[0] for d in data if d[2] == 1]
ys = [d[1] for d in data if d[2] == 1]
self.full_rips_axes.scatter(xs,ys, color="blue")
xs = [d[0] for d in data if d[2] == 0]
ys = [d[1] for d in data if d[2] == 0]
self.full_rips_axes.scatter(xs,ys, color="grey")
self.full_rips_axes.plot([0,max_val],[0,max_val],color="red")
self.sparse_rips_canvas.restore_region(self.background)
if self.simplices[self.simplices_index] != None :
self.sparse_rips_title.set_text("max simplices %s runtime %s sparsity 0 %02.2f%% 1 %02.2f%% 2 %02.2f%%" % \
(self.simplices[self.simplices_index],
self.sparse_rips_runtimes[self.segment_index][self.simplices_index],
self.sparse_rips_sparsity[self.segment_index][self.simplices_index][0] if self.sparse_rips_sparsity[self.segment_index][self.simplices_index] != None else 0.0,
self.sparse_rips_sparsity[self.segment_index][self.simplices_index][1] if self.sparse_rips_sparsity[self.segment_index][self.simplices_index] != None else 0.0,
self.sparse_rips_sparsity[self.segment_index][self.simplices_index][2] if self.sparse_rips_sparsity[self.segment_index][self.simplices_index] != None else 0.0))
else :
self.sparse_rips_title.set_text("epsilon %g runtime %s sparsity 0 %02.2f%% 1 %02.2f%% 2 %02.2f%%" % \
(self.epsilons[self.simplices_index],
self.sparse_rips_runtimes[self.segment_index][self.simplices_index],
self.sparse_rips_sparsity[self.segment_index][self.simplices_index][0] if self.sparse_rips_sparsity[self.segment_index][self.simplices_index] != None else 0.0,
self.sparse_rips_sparsity[self.segment_index][self.simplices_index][1] if self.sparse_rips_sparsity[self.segment_index][self.simplices_index] != None else 0.0,
self.sparse_rips_sparsity[self.segment_index][self.simplices_index][2] if self.sparse_rips_sparsity[self.segment_index][self.simplices_index] != None else 0.0))
self.sparse_rips_axes.cla()
self.sparse_rips_axes.set_title("distance bottleneck %.3f wasserstein l1 %.3f l2 %.3f" % \
(self.sparse_rips_distances[self.segment_index][self.simplices_index][0],
self.sparse_rips_distances[self.segment_index][self.simplices_index][1],
self.sparse_rips_distances[self.segment_index][self.simplices_index][2]),
fontdict=dict([('fontsize',12)]))
data = self.sparse_rips_persistences[self.segment_index][self.simplices_index].points
if data != None and len(data) > 0 :
xs = [d[0] for d in data if d[2] == 1]
ys = [d[1] for d in data if d[2] == 1]
self.sparse_rips_axes.scatter(xs,ys, color="blue")
xs = [d[0] for d in data if d[2] == 0]
ys = [d[1] for d in data if d[2] == 0]
self.sparse_rips_axes.scatter(xs,ys, color="grey")
self.sparse_rips_axes.plot([0,max_val],[0,max_val],color="red")
def KeyEvent(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_LEFT :
self.simplices_index = (self.simplices_index - 1) % len(self.simplices)
self.refresh()
wx.PostEvent(self,wx.PaintEvent())
elif keycode == wx.WXK_RIGHT :
self.simplices_index = (self.simplices_index + 1) % len(self.simplices)
self.refresh()
wx.PostEvent(self,wx.PaintEvent())
elif keycode == wx.WXK_UP :
self.segment_index = (self.segment_index - 1) % len(self.full_rips_persistences)
self.refresh()
wx.PostEvent(self,wx.PaintEvent())
elif keycode == wx.WXK_DOWN :
self.segment_index = (self.segment_index + 1) % len(self.full_rips_persistences)
self.refresh()
wx.PostEvent(self,wx.PaintEvent())
else :
pass
event.Skip()
def OnPaint(self, event):
paint_dc = wx.PaintDC(self)
self.full_rips_canvas.draw()
self.sparse_rips_canvas.draw()
class App(wx.App):
def __init__(self, arg, full_data):
self.full_data = full_data
wx.App.__init__(self,0)
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame(self.full_data)
frame.Show(True)
self.SetTopWindow(frame)
return True
import glob
def main(argv) :
parser = argparse.ArgumentParser(description="utility to plot \
persistence diagrams for examining full vs sparse rips filtration")
parser.add_argument('-p', '--prefix', help="data file prefix (e.g. foo.json to plot foo.json.0000 - foo.json.9999)")
args = parser.parse_args(argv[1:])
files = glob.glob(args.prefix + ".[0-9][0-9][0-9][0-9]")
files.sort()
full_data = [load_data(fn, None, None, None, argv[0] + " : ") for fn in files]
try:
app = App(0, full_data)
app.MainLoop()
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__" :
main(sys.argv)
| gpl-3.0 |
sfalkner/pySMAC | pysmac/analyzer.py | 2 | 7707 | from __future__ import print_function, division, absolute_import
import os
import glob
import six
import re
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('pySMAC was not installed with analyzer support.')
import sys
sys.path.append('/home/sfalkner/repositories/github/pysmac/')
import pysmac.remote_smac
import pysmac.utils.smac_output_readers as smac_readers
class SMAC_analyzer(object):
# collects smac specific data that goes into the scenario file
def __init__(self, obj):
if isinstance(obj,pysmac.remote_smac.remote_smac):
self.scenario_fn = os.path.join(obj.working_directory, 'scenario.dat')
self.scenario_fn = str(obj)
# if it is a string, it can be a directory or a file
if isinstance(obj, six.string_types):
if os.path.isfile(obj):
self.scenario_fn=obj
else:
self.scenario_fn=os.path.join(obj, 'scenario.dat')
self.validation = False
self.overall_objective = "MEAN"
# parse scenario file for important information
with open(self.scenario_fn,'r') as fh:
for line in fh.readlines():
strlist = line.split()
if strlist[0] in {'output-dir', 'outputDirectory', 'outdir'}:
self.output_dir = strlist[1]
if strlist[0] in {'pcs-file'}:
self.pcs_fn = strlist[1]
if strlist[0] == 'validation':
self.validation = bool(strlist[1])
if strlist[0] in {'intra-obj', 'intra-instance-obj', 'overall-obj', 'intraInstanceObj', 'overallObj', 'overall_obj','intra_instance_obj'}:
self.overall_objective = strlist[1]
if strlist[0] in {'algo-cutoff-time','target-run-cputime-limit', 'target_run_cputime_limit', 'cutoff-time', 'cutoffTime', 'cutoff_time'}:
self.cutoff_time = float(strlist[1])
# find the number of runs
self.scenario_output_dir = (os.path.join(self.output_dir,
os.path.basename(''.join(self.scenario_fn.split('.')[:-1]))))
tmp = glob.glob( os.path.join(self.scenario_output_dir, "traj-run-*.txt"))
# create the data dict for every run index
self.data = {}
for fullname in tmp:
filename = (os.path.basename(fullname))
run_id = re.match("traj-run-(\d*).txt",filename).group(1)
self.data[int(run_id)]={}
# for now, we only load the incumbents for each run
for i in list(self.data.keys()):
try:
# with test instances, the validation runs are loaded
if self.validation:
configs = smac_readers.read_validationCallStrings_file(
os.path.join(self.scenario_output_dir,
"validationCallStrings-traj-run-{}-walltime.csv".format(i)))
test_performances = smac_readers.read_validationObjectiveMatrix_file(
os.path.join(self.scenario_output_dir,
"validationObjectiveMatrix-traj-run-{}-walltime.csv".format(i)))
# without validation, there are only trajectory files to pase
else:
raise NotImplemented("The handling of cases without validation runs is not yet implemented")
self.data[i]['parameters'] = configs
self.data[i]['test_performances'] = test_performances
except:
print("Failed to load data for run {}. Please make sure it has finished properly.\nDropping it for now.".format(i))
self.data.pop(i)
def get_pyfanova_obj(self, improvement_over='DEFAULT', check_scenario_files = True, heap_size=8192):
try:
import pyfanova.fanova
self.merged_dir = os.path.join(self.output_dir,"merged_run")
# delete existing merged run folder
if os.path.exists(self.merged_dir):
import shutil
shutil.rmtree(self.merged_dir)
from pysmac.utils.state_merge import state_merge
print(os.path.join(self.scenario_output_dir, 'state-run*'))
print(glob.glob(os.path.join(self.scenario_output_dir, 'state-run*')))
state_merge(glob.glob(os.path.join(self.scenario_output_dir, 'state-run*')),
self.merged_dir, check_scenario_files = check_scenario_files)
return(pyfanova.fanova.Fanova(self.merged_dir, improvement_over=improvement_over,heap_size=heap_size))
except ImportError:
raise
raise NotImplementedError('To use this feature, please install the pyfanova package.')
except:
print('Something went during the initialization of the FANOVA.')
raise
def get_item_all_runs(self, func = lambda d: d['function value']):
return ([list(map(func, run[1:])) for run in self.data_all_runs])
def get_item_single_run(self, run_id, func = lambda d: d['function value']):
return list(map(func,self.data_all_runs[run_id][1:]))
def plot_run_performance(self, runs = None):
plot = interactive_plot()
for i in range(len(self.data_all_runs)):
y = self.get_item_single_run(i)
x = list(range(len(y)))
plot.scatter(self.data_all_runs[i][0], x,y, self.get_item_single_run(i, func = lambda d: '\n'.join(['%s=%s'%(k,v) for (k,v) in list(d['parameter settings'].items()) ]) ), color = self.cm[i])
plot.add_datacursor()
plot.show()
def plot_run_incumbent(self, runs = None):
plot = interactive_plot()
for i in range(len(self.data_all_runs)):
y = np.minimum.accumulate(self.get_item_single_run(i))
#x =
_ , indices = np.unique(y, return_index = True)
print(indices)
indices = np.append(indices[::-1], len(y)-1)
print(indices)
x = np.arange(len(y))[indices]
y = y[indices]
print(x,y)
print('='*40)
plot.step(self.data_all_runs[i][0], x, y, color = self.cm[i])
plot.add_datacursor(formatter = 'iteration {x:.0f}: {y}'.format)
plot.show()
def basic_analysis (self):
fig, ax = plt.subplots()
ax.set_title('function value vs. number of iterations')
ax.set_xlabel('iteration')
ax.set_ylabel('function value')
for i in range(len(self.trajectory)):
color='red' if i == self.incumbent_index else 'blue'
ax.scatter( i, self.trajectory[i][0], color=color, label = '\n'.join(['%s = %s' % (k,v) for (k, v) in list(self.trajectory[i][2].items())]))
datacursor(
bbox=dict(alpha=1),
formatter = 'iteration {x:.0f}: {y}\n{label}'.format,
hover=False,
display='multiple',
draggable=True,
horizontalalignment='center',
hide_button = 3)
fig, ax = plt.subplots()
incumbents = np.minimum.accumulate(list(map(itemgetter(0), self.trajectory)))
ax.step(list(range(len(incumbents))), incumbents)
plt.show()
| agpl-3.0 |
schoolie/bokeh | bokeh/models/tests/test_sources.py | 1 | 9664 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource
from bokeh.util.serialization import transform_column_source_data
class TestColumnDataSource(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertIsInstance(ds.data[key], pd.Series)
self.assertEquals(list(df[key]), list(ds.data[key]))
self.assertIsInstance(ds.data['index'], np.ndarray)
self.assertEquals([0, 1], list(ds.data['index']))
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertIsInstance(ds.data[key], pd.Series)
self.assertEquals(list(df[key]), list(ds.data[key]))
self.assertIsInstance(ds.data['index'], np.ndarray)
self.assertEquals([0, 1], list(ds.data['index']))
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
def test_stream_bad_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
with self.assertRaises(ValueError) as cm:
ds.stream(dict())
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: a, b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b, extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10, 20]))
self.assertEqual(str(cm.exception), "All streaming column updates must be the same length")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=np.ones((1,1))))
self.assertTrue(
str(cm.exception).startswith("stream(...) only supports 1d sequences, got ndarray with size (")
)
def test_stream_good_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
ds.stream(dict(a=[11, 12], b=[21, 22]), "foo", mock_setter)
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo", mock_setter))
self.assertEqual(stuff['kw'], {})
def test_patch_bad_data(self):
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(3, 100)]))
self.assertEqual(str(cm.exception), "Out-of bounds index (3) in patch for column: a")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(c=[(0, 100)]))
self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c)")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(0,100)], c=[(0, 100)], d=[(0, 100)]))
self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c, d)")
def test_patch_good_data(self):
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._patch = mock
ds.patch(dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter)
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter))
self.assertEqual(stuff['kw'], {})
def test_data_column_lengths(self):
# TODO: use this when soft=False
#
#with self.assertRaises(ValueError):
# ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))
#
#ds = ColumnDataSource()
#with self.assertRaises(ValueError):
# ds.data = dict(a=[10, 11], b=[20, 21, 22])
#
#ds = ColumnDataSource(data=dict(a=[10, 11]))
#with self.assertRaises(ValueError):
# ds.data["b"] = [20, 21, 22]
#
#ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
#with self.assertRaises(ValueError):
# ds.data.update(dict(a=[10, 11, 12]))
with warnings.catch_warnings(record=True) as warns:
ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)")
ds = ColumnDataSource()
with warnings.catch_warnings(record=True) as warns:
ds.data = dict(a=[10, 11], b=[20, 21, 22])
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)")
ds = ColumnDataSource(data=dict(a=[10, 11]))
with warnings.catch_warnings(record=True) as warns:
ds.data["b"] = [20, 21, 22]
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)")
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
with warnings.catch_warnings(record=True) as warns:
ds.data.update(dict(a=[10, 11, 12]))
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 3), ('b', 2)")
def test_set_data_from_json_list(self):
ds = ColumnDataSource()
data = {"foo": [1, 2, 3]}
ds.set_from_json('data', data)
self.assertEquals(ds.data, data)
def test_set_data_from_json_base64(self):
ds = ColumnDataSource()
data = {"foo": np.arange(3)}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
def test_set_data_from_json_nested_base64(self):
ds = ColumnDataSource()
data = {"foo": [[np.arange(3)]]}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
def test_set_data_from_json_nested_base64_and_list(self):
ds = ColumnDataSource()
data = {"foo": [np.arange(3), [1, 2, 3]]}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
ggould256/libpmp | historical/burndown.py | 1 | 3073 | # Copyright 2016 Toyota Research Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to create a burndown chart from a task history."""
from io import BytesIO
from matplotlib import pyplot as plt
from numpy import linspace
from scipy.interpolate import interp1d
from common import text
from distributions import distribution
from distributions.operations import dist_add
def create_burndown_html(history, node):
"""Create a burndown plot for the given @p node.
@return that plot as an html `div` element."""
return ('<div>' +
create_burndown_svg(history, node) + "</div>")
def create_burndown_svg(history, node):
"""Create a burndown plot for the given @p node.
@return that plot as a utf-8 string of svg data."""
plt.clf()
_plot_burndown(history, node)
figure_bytes = BytesIO()
plt.savefig(figure_bytes, format="svg")
return figure_bytes.getvalue().decode("utf-8")
def _get_historical_costs(history, node):
"""Return a list [(date, distribution)] for a node.
Totals the projected cost of every predecessor of @p node at every date
in @p history.
"""
node_history = history.get_linear_history(node)
cost_history = []
for (date, nodes) in node_history:
cost_for_date = distribution.ZERO
for node in nodes:
cost_for_date = dist_add(cost_for_date, node.final_cost())
cost_history += [(date, cost_for_date)]
return cost_history
def _burndown_axes(history, node):
"""Configure the axes of the current plot and @return them."""
axes = plt.axes()
axes.set_xlabel("Date")
axes.set_ylabel("Remaining Cost")
axes.set_title(node.get_display_name())
axes.set_xlim(left=history.oldest_date(), right=history.most_recent_date())
axes.set_autoscaley_on(True)
return axes
def _plot_burndown(history, node):
"""Write a burndown plot to the current pyplot figure."""
plt.xkcd() # Represent approximate/unfounded estimates with xkcd art.
axes = _burndown_axes(history, node)
costs = _get_historical_costs(history, node)
dates = [date for (date, _) in costs]
def quantile_history(q):
return [cost.quantile(q) for (date, cost) in costs]
axes.fill_between(dates, quantile_history(0.1), quantile_history(0.9),
hatch="/", edgecolor="red")
axes.fill_between(dates, quantile_history(0.25), quantile_history(0.75),
hatch="\\", edgecolor="red")
axes.plot(dates, quantile_history(0.5), color="black")
axes.set_ylim(bottom=0.)
| apache-2.0 |
PatrickChrist/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | 3 | 7028 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
import sys
from .mpl_axes import Axes
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
class RGBAxesBase(object):
"""base class for a 4-panel imshow (RGB, R, G, B)
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Attributes
----------
_defaultAxesClass : matplotlib.axes.Axes
defaults to 'Axes' in RGBAxes child class.
No default in abstract base class
RGB : _defaultAxesClass
The axes object for the three-channel imshow
R : _defaultAxesClass
The axes object for the red channel imshow
G : _defaultAxesClass
The axes object for the green channel imshow
B : _defaultAxesClass
The axes object for the blue channel imshow
"""
def __init__(self, *kl, **kwargs):
"""
Parameters
----------
pad : float
fraction of the axes height to put as padding.
defaults to 0.0
add_all : bool
True: Add the {rgb, r, g, b} axes to the figure
defaults to True.
axes_class : matplotlib.axes.Axes
kl :
Unpacked into axes_class() init for RGB
kwargs :
Unpacked into axes_class() init for RGB, R, G, B axes
"""
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
try:
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
except AttributeError:
new_msg = ("A subclass of RGBAxesBase must have a "
"_defaultAxesClass attribute. If you are not sure which "
"axes class to use, consider using "
"mpl_toolkits.axes_grid1.mpl_axes.Axes.")
six.reraise(AttributeError, AttributeError(new_msg),
sys.exc_info()[2])
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self, line_color='w', marker_edge_color='w'):
"""Set the line color and ticks for the axes
Parameters
----------
line_color : any matplotlib color
marker_edge_color : any matplotlib color
"""
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color(line_color)
ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
def add_RGB_to_figure(self):
"""Add the red, green and blue axes to the RGB composite's axes figure
"""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""Create the four images {rgb, r, g, b}
Parameters
----------
r : array-like
The red array
g : array-like
The green array
b : array-like
The blue array
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
ny, nx = r.shape
if not ((nx, ny) == g.shape == b.shape):
raise ValueError('Input shapes do not match.'
'\nr.shape = {}'
'\ng.shape = {}'
'\nb.shape = {}'
''.format(r.shape, g.shape, b.shape))
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
| apache-2.0 |
darcamo/pyphysim | pyphysim/modulators/fundamental.py | 1 | 26699 | #!/usr/bin/env python
# http://www.doughellmann.com/PyMOTW/struct/
# import struct
# import binascii
"""
Module with class for some fundamental modulators, such as PSK and M-QAM.
All fundamental modulators inherit from the `Modulator` class and should
call the self.setConstellation method in their __init__ method, as well
as implement the calcTheoreticalSER and calcTheoreticalBER methods. """
try:
# noinspection PyUnresolvedReferences
import matplotlib.pyplot as plt
_MATPLOTLIB_AVAILABLE = True
except ImportError: # pragma: no cover
_MATPLOTLIB_AVAILABLE = False
import math
from typing import Optional, TypeVar, Union
import numpy as np
from pyphysim.util.conversion import binary2gray, dB2Linear, gray2binary
from pyphysim.util.misc import level2bits, qfunc
PI = np.pi
NumberOrArray = TypeVar("NumberOrArray", np.ndarray, float)
__all__ = ['Modulator', 'PSK', 'QPSK', 'BPSK', 'QAM']
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx Modulator Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class Modulator:
"""
Base class for digital modulators.
The derived classes need to at least call setConstellation to set the
constellation in their constructors as well as implement
calcTheoreticalSER and calcTheoreticalBER.
Examples
--------
>>> np.set_printoptions(linewidth=70)
>>> constellation = np.array([1 + 1j, - 1 + 1j, - 1 - 1j, 1 - 1j])
>>> m=Modulator()
>>> m.setConstellation(constellation)
>>> m.symbols
array([ 1.+1.j, -1.+1.j, -1.-1.j, 1.-1.j])
>>> m.M
4
>>> m.K
2.0
>>> m
4-Modulator object
>>> m.modulate(np.array([0, 0, 3, 3, 1, 3, 3, 3, 2, 2]))
array([ 1.+1.j, 1.+1.j, 1.-1.j, 1.-1.j, -1.+1.j, 1.-1.j, 1.-1.j,
1.-1.j, -1.-1.j, -1.-1.j])
>>> m.demodulate(np.array([ 1. + 1.j, 1. + 1.j, 1. - 1.j, 1. - 1.j, \
- 1. + 1.j, 1. - 1.j, 1. - 1.j, 1. - 1.j, \
- 1. - 1.j, - 1. - 1.j]))
array([0, 0, 3, 3, 1, 3, 3, 3, 2, 2])
"""
def __init__(self) -> None:
"""Initializes the Modulator object.
"""
# This should be set in a subclass of the Modulator Class by
# calling the setConstellation method..
self._M: int = 0 # Constellation size (modulation cardinality)
# Number of bits represented by each symbol in the constellation
self._K: int = 0
self.symbols: np.ndarray = np.array([])
@property
def name(self) -> str:
"""
Get method for the 'name' property.
Returns
-------
str
The name of the modulator.
"""
return "{0:d}-{1:s}".format(self._M, self.__class__.__name__)
@property
def M(self) -> int:
"""
Get method for the M property.
The `M` property corresponds to the number of symbols in the
constellation.
See also
--------
K
"""
return self._M
@property
def K(self) -> int:
"""
Get method for the K property.
The `K` property corresponds to the number of bits represented by
each symbol in the constellation. It is equal to log2(M), where `M`
is the constellation size.
See also
--------
M
"""
return self._K
def __repr__(self) -> str: # pragma: no cover
"""
Get the string representation of the object.
Returns
-------
str
String representation of the object.
"""
return "{0} object".format(self.name)
def setConstellation(self, symbols: np.ndarray) -> None:
"""
Set the constellation of the modulator.
This function should be called in the constructor of the derived
classes.
Parameters
----------
symbols : np.ndarray
A an numpy array with the symbol table.
"""
M = symbols.size
self._M = M
self._K = np.log2(M)
self.symbols = symbols
def plotConstellation(self) -> None: # pragma: no cover
"""Plot the constellation (in a scatter plot).
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # one row, one column, first plot
# circlePatch = patches.Circle((0,0),1)
# circlePatch.set_fill(False)
# ax.add_patch(circlePatch)
ax.scatter(self.symbols.real, self.symbols.imag)
ax.axis('equal')
ax.grid()
formatString = "{0:0=" + str(level2bits(self._M)) + "b} ({0})"
index = 0
for symbol in self.symbols:
ax.text(
symbol.real, # Coordinate X
symbol.imag + 0.03, # Coordinate Y
formatString.format(index, format_spec="0"), # Text
verticalalignment='bottom', # From now on, text properties
horizontalalignment='center')
index += 1
plt.show()
def modulate(self, inputData: Union[int, np.ndarray]) -> np.ndarray:
"""
Modulate the input data (decimal data).
Parameters
----------
inputData : np.ndarray | int
Data to be modulated.
Returns
-------
modulated_data : np.ndarray
The modulated data
Raises
------
ValueError
If inputData has any invalid value such as values greater
than self._M - 1. Note that inputData should not have
negative values but no check is done for this.
"""
try:
return self.symbols[inputData]
except IndexError:
raise ValueError("Input data must be between 0 and 2^M")
def demodulate(self, receivedData: np.ndarray) -> np.ndarray:
"""
Demodulate the data.
Parameters
----------
receivedData : np.ndarray
Data to be demodulated.
Returns
-------
demodulated_data : np.ndarray
The demodulated data.
"""
# xxxxxxxxxx First Try xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# def getClosestSymbol(symb):
# closestSymbolIndex = np.abs(self.symbols - symb).argmin()
# return closestSymbolIndex
# getClosestSymbol = np.frompyfunc(getClosestSymbol, 1, 1)
# return getClosestSymbol(receivedData).astype(int)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# ### Second Try xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # This versions is a little faster then the first version
# shape = receivedData.shape
# num_symbols = receivedData.size
# output = np.empty(num_symbols, dtype=int)
# reshaped_received_data = receivedData.flatten()
# for ii in range(num_symbols):
# output[ii] = np.abs(
# self.symbols
# - reshaped_received_data[ii]).argmin()
# output.shape = shape
# return output
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Third Try xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# This version uses more memory because of the numpy broadcasting,
# but it is much faster.
shape = receivedData.shape
reshaped_received_data = receivedData.flatten()
constellation = np.reshape(self.symbols, [self.symbols.size, 1])
output = np.abs(constellation - reshaped_received_data).argmin(axis=0)
output.shape = shape
return output
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def calcTheoreticalSER(
self, SNR: NumberOrArray) -> NumberOrArray: # pragma: no cover
"""
Calculates the theoretical symbol error rate.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
SER : float | np.ndarray
The theoretical symbol error rate.
See also
--------
calcTheoreticalBER,
calcTheoreticalPER
Notes
-----
This function should be implemented in the derived classes
"""
raise NotImplementedError("calcTheoreticalSER: Not implemented")
def calcTheoreticalBER(
self, SNR: NumberOrArray) -> NumberOrArray: # pragma: no cover
"""
Calculates the theoretical bit error rate.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
BER : float | np.ndarray
The theoretical bit error rate.
See also
--------
calcTheoreticalSER,
calcTheoreticalPER
Notes
-----
This function should be implemented in the derived classes
"""
raise NotImplementedError("calcTheoreticalBER: Not implemented")
def calcTheoreticalPER(self, SNR: NumberOrArray,
packet_length: int) -> NumberOrArray:
"""
Calculates the theoretical package error rate.
A package is a group of bits, where if a single bit is in error
then the whole package is considered to be in error.
The package error rate (PER) is a direct mapping of the bit error
rate (BER), such that
.. math::
PER = 1 - (1 - BER)^{L}
where :math:`L` is the package_length.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
packet_length : int
The package length. That is, the number of bits in each
package.
Returns
-------
PER : float | np.ndarray
The theoretical package error rate.
See also
--------
calcTheoreticalBER,
calcTheoreticalSER
calcTheoreticalSpectralEfficiency
"""
BER = self.calcTheoreticalBER(SNR)
PER = 1 - ((1 - BER)**packet_length)
return PER
def calcTheoreticalSpectralEfficiency(
self,
SNR: NumberOrArray,
packet_length: Optional[int] = None) -> NumberOrArray:
"""
Calculates the theoretical spectral efficiency.
If there was no error in the transmission, the spectral efficiency
would be equal to the `K` property, that is, equal to the number of
bits represented by each symbol in the constellation. However, due
to bit errors the effective spectral efficiency will be lower.
The calcTheoreticalSpectralEfficiency method calculates the
effective spectral efficiency from the `K` property and the package
error rate (PER) for the given SNR and packet_length 'L', such that
.. math::
se = K * (1 - PER)
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
packet_length : int, optional
The package length. That is, the number of bits in each
package.
Returns
-------
se : float | np.ndarray
The theoretical spectral efficiency.
See also
--------
calcTheoreticalBER,
calcTheoreticalPER,
K
"""
if packet_length is None:
se = self.K * (1 - self.calcTheoreticalBER(SNR))
else:
se = self.K * (1 - self.calcTheoreticalPER(SNR, packet_length))
return se
# xxxxx End of Modulator Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx PSK Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class PSK(Modulator):
"""PSK Class
"""
def __init__(self, M: int, phaseOffset: float = 0) -> None:
"""Initializes the PSK object.
Parameters
----------
M : int
The modulation cardinality
phaseOffset: float, optional
A phase offset (in radians) to be applied to the PSK
constellation.
"""
super().__init__()
# Check if M is a power of 2
assert 2**math.log(M, 2) == M
# Generates the constellation
symbols = self._createConstellation(M, phaseOffset)
# Change to Gray mapping
# noinspection PyUnresolvedReferences
symbols = symbols[gray2binary(np.arange(0, M))]
self.setConstellation(symbols)
# noinspection PyUnresolvedReferences
@staticmethod
def _createConstellation(M: int, phaseOffset: float) -> np.ndarray:
"""Generates the Constellation for the PSK modulation scheme.
Parameters
----------
M : int
The modulation cardinality
phaseOffset: float
A phase offset (in radians) to be applied to the PSK
constellation.
Returns
-------
symbols : np.ndarray
The PSK constellation with the desired cardinality and phase
offset.
"""
phases = 2. * PI / M * np.arange(0, M) + phaseOffset
realPart = np.cos(phases)
imagPart = np.sin(phases)
# Any number inferior to 1e-15 will be considered as 0
realPart[abs(realPart) < 1e-15] = 0
imagPart[abs(imagPart) < 1e-15] = 0
return realPart + 1j * imagPart
def setPhaseOffset(self, phaseOffset: float) -> None:
"""Set a new phase offset for the constellation
Parameters
----------
phaseOffset: float
A phase offset (in radians) to be applied to the PSK
constellation.
"""
self.setConstellation(self._createConstellation(self._M, phaseOffset))
# noinspection PyPep8
def calcTheoreticalSER(self, SNR: NumberOrArray) -> NumberOrArray:
"""Calculates the theoretical (approximation for high M and high
SNR) symbol error rate for the M-PSK scheme.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
SER : float | np.ndarray
The theoretical symbol error rate.
"""
snr = dB2Linear(SNR)
# $P_s \approx 2Q\left(\sqrt{2\gamma_s}\sin\frac{\pi}{M}\right)$
# Alternative formula (same result)
# $P_s = erfc \left ( \sqrt{\gamma_s} \sin(\frac{\pi}{M}) \right )$
ser = 2. * qfunc(np.sqrt(2. * snr) * math.sin(PI / self._M))
return ser
def calcTheoreticalBER(self, SNR: NumberOrArray) -> NumberOrArray:
"""Calculates the theoretical (approximation) bit error rate for
the M-PSK scheme using Gray coding.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
BER : float | np.ndarray
The theoretical bit error rate.
"""
# $P_b = \frac{1}{k}P_s$
# Number of bits per symbol
k = level2bits(self._M)
return 1.0 / k * self.calcTheoreticalSER(SNR)
# xxxxx End of PSK Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx QPSK Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class QPSK(PSK): # pragma: no cover
"""QPSK Class
"""
def __init__(self) -> None:
super().__init__(4, PI / 4.)
def __repr__(self) -> str: # pragma: no cover
"""
Get the string representation of the object.
Returns
-------
str
String representation of the object.
"""
return "QPSK object"
# xxxxx End of QPSK Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx BPSK Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class BPSK(Modulator):
"""BPSK Class
"""
def __init__(self) -> None:
super().__init__()
# The number "1" will be mapped to "-1" and the number "0" will be
# mapped to "1"
self.setConstellation(np.array([1, -1]))
@property
def name(self) -> str:
"""
Get the name property.
Returns
-------
str
The name of the modulator.
"""
return "{0:s}".format(self.__class__.__name__)
def __repr__(self) -> str: # pragma: no cover
"""
Get the string representation of the object.
Returns
-------
str
String representation of the object.
"""
return "BPSK object"
def calcTheoreticalSER(self, SNR: NumberOrArray) -> NumberOrArray:
"""
Calculates the theoretical (approximation) symbol error rate for
the BPSK scheme.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
SER : float | np.ndarray
The theoretical symbol error rate.
"""
snr = dB2Linear(SNR)
# $P_b = Q\left(\sqrt{\frac{2E_b}{N_0}}\right)$
# Alternative formula (same result)
# $P_b = \frac{1}{2}erfc \left ( \sqrt{\frac{E_b}{N_0}} \right )$
ser = qfunc(np.sqrt(2 * snr))
return ser
def calcTheoreticalBER(self, SNR: NumberOrArray) -> NumberOrArray:
"""
Calculates the theoretical (approximation) bit error rate for the
BPSK scheme.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
BER : float | np.ndarray
The theoretical bit error rate.
"""
return self.calcTheoreticalSER(SNR)
def modulate(self, inputData: np.ndarray) -> np.ndarray:
"""
Modulate the input data (decimal data).
Parameters
----------
inputData : np.ndarray
Data to be modulated.
Returns
-------
modulated_data : np.ndarray
The modulated data
Raises
------
ValueError
If inputData has any invalid value such as values greater
than self._M - 1. Note that inputData should not have
negative values but no check is done for this.
"""
# noinspection PyTypeChecker
if np.any(inputData > 1):
raise ValueError("Input data can only contains '0's and '1's")
return 1 - 2 * inputData
def demodulate(self, receivedData: np.ndarray) -> np.ndarray:
"""
Demodulate the data.
Parameters
----------
receivedData : np.ndarray
Data to be demodulated.
Returns
-------
demodulated_data : np.ndarray
The demodulated data.
"""
# noinspection PyUnresolvedReferences
return (receivedData < 0).astype(int)
# xxxxx End of BPSK Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx QAM Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class QAM(Modulator):
"""QAM Class
"""
def __init__(self, M: int) -> None:
"""Initializes the QAM object.
Parameters
----------
M : int
The modulation cardinality
Raises
------
ValueError
If M is not a square power of 2.
"""
super().__init__()
# Check if M is an even power of 2
power = math.log(M, 2)
if (power % 2 != 0) or (2**power != M):
raise ValueError("M must be a square power of 2")
symbols = self._createConstellation(M)
L = int(round(math.sqrt(M)))
grayMappingIndexes = self._calculateGrayMappingIndexQAM(L)
# noinspection PyUnresolvedReferences
symbols = symbols[grayMappingIndexes]
# Set the constellation
self.setConstellation(symbols)
@staticmethod
def _createConstellation(M: int) -> np.ndarray:
"""
Generates the Constellation for the (SQUARE) M-QAM modulation
scheme.
Parameters
----------
M : int
The modulation cardinality
Returns
-------
symbols : np.ndarray
The QAM constellation with the desired cardinality.
"""
# Size of the square. The square root is exact
symbols = np.empty(M, dtype=complex)
L = int(round(math.sqrt(M)))
for jj in range(0, L):
for ii in range(0, L):
symbol = complex(-(L - 1) + jj * 2, (L - 1) - ii * 2)
symbols[ii * L + jj] = symbol
average_energy = (M - 1) * 2.0 / 3.0
# Normalize the constellation, so that the mean symbol energy is
# equal to one.
return symbols / math.sqrt(average_energy)
@staticmethod
def _calculateGrayMappingIndexQAM(L: int) -> np.ndarray:
"""
Calculates the indexes that should be applied to the
constellation created by _createConstellation in order to
correspond to Gray mapping.
Notice that the square M-QAM constellation is a matrix of dimension
L x L, where L is the square root of M. Since the constellation was
generated without taking into account the Gray mapping, then we
need to reorder the generated constellation and this function
calculates the indexes that can be applied to the original
constellation in order to do exactly that.
As an example, for the 16-QAM modulation the indexes can be
organized (row order) in the matrix below
==== ====== ====== ====== ======
/ 00 01 11 10
==== ====== ====== ====== ======
00 0000 0001 0011 0010
01 0100 0101 0111 0110
11 1100 1101 1111 1110
10 1000 1001 1011 1010
==== ====== ====== ====== ======
This is equivalent to concatenate a Gray mapping for the row with a
Gray mapping for the column, and the corresponding indexes are
[0, 1, 3, 2, 4, 5, 7, 6, 12, 13, 15, 14, 8, 9, 11, 10]
Parameters
----------
L : int
Square root of the modulation cardinality (must be an integer).
Returns
-------
indexes : np.ndarray
indexes that should be applied to the constellation created by
_createConstellation in order to correspond to Gray mapping
"""
# Row vector with the column variation (second half of the index in
# binary form)
column = binary2gray(np.arange(0, L, dtype=int))
# Column vector with the row variation
#
# Column vector with the row variation (first half of the index in
# binary form)
row = column.reshape(L, 1)
columns = np.tile(column, (L, 1))
rows = np.tile(row, (1, L))
# Shift the first part by half the number of bits and sum with the
# second part to form each element in the index matrix
index_matrix = (rows << (level2bits(L**2) // 2)) + columns
# Return the indexes as a vector (row order, which is the default
# in numpy)
return np.reshape(index_matrix, L**2)
# noinspection PyPep8
def _calcTheoreticalSingleCarrierErrorRate(
self, SNR: NumberOrArray) -> NumberOrArray:
"""
Calculates the theoretical (approximation) error rate of a single
carrier in the QAM system (QAM has two carriers).
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
Psc : float | np.ndarray
The theoretical single carrier error rate.
Notes
-----
This method is used in the :meth:`calcTheoreticalSER`
implementation.
See also
--------
calcTheoreticalSER
"""
snr = dB2Linear(SNR)
# Probability of error of each carrier in a square QAM
# $P_{sc} = 2\left(1 - \frac{1}{\sqrt M}\right)Q\left(\sqrt{\frac{3}{M-1}\frac{E_s}{N_0}}\right)$
sqrtM = np.sqrt(self._M)
Psc = (2. * (1. - (1. / sqrtM)) *
qfunc(np.sqrt(snr * 3. / (self._M - 1.))))
return Psc # type: ignore
def calcTheoreticalSER(self, SNR: NumberOrArray) -> NumberOrArray:
"""
Calculates the theoretical (approximation) symbol error rate for
the QAM scheme.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
SER : float | np.ndarray
The theoretical symbol error rate.
"""
Psc = self._calcTheoreticalSingleCarrierErrorRate(SNR)
# The SER is then given by
# $ser = 1 - (1 - Psc)^2$
ser = 1 - (1 - Psc)**2
return ser
def calcTheoreticalBER(self, SNR: NumberOrArray) -> NumberOrArray:
"""
Calculates the theoretical (approximation) bit error rate for
the QAM scheme.
Parameters
----------
SNR : float | np.ndarray
Signal-to-noise-value (in dB).
Returns
-------
BER : float | np.ndarray
The theoretical bit error rate.
"""
# For higher SNR values and gray mapping, each symbol error
# corresponds to approximately a single bit error. The BER is then
# given by the probability of error of a single carrier in the QAM
# system divided by the number of bits transported in that carrier.
k = level2bits(self._M)
Psc = self._calcTheoreticalSingleCarrierErrorRate(SNR)
ber = (2. * Psc) / k
return ber
# xxxxx End of QAM Class xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
| gpl-2.0 |
RRCKI/pilot | dataPilot.py | 1 | 34155 | #!/bin/env python
################################################################################
# Trivial version of SimplePilot to be used with pilotWrapper
#
# The purpose of this version is to have a wrapper/pilot set
# as less verbose as possible, just for fast tests.
#
################################################################################
import sys, os, time, urllib2, cgi, commands, re, getopt
import pUtil as utils
import pickle
import tempfile
import stat
import shutil
import myproxyUtils
import subprocess
#baseURLSSL = 'https://voatlas57.cern.ch:25443/server/panda'
baseURLSSL = 'https://pandaserver.cern.ch:25443/server/panda'
SC_TimeOut = 10
################################################################################
# A u x i l i a r y c l a s s e s
################################################################################
import threading
class ThreadHandler(threading.Thread):
def __init__(self, period, function, *args):
threading.Thread.__init__(self) # seting the engine to start the thread
self.__stopevent = threading.Event() # to stop the thread when needed
self.__function = function # function to be performed within the thread
self.__period = period # time between two calls to function
self.__args = args
def run(self):
"""executes actions periodically
"""
n = 0
while True:
time.sleep(1)
n += 1
if self.__stopevent.isSet():
# kills the thread
break
if n == self.__period:
self.__function(*self.__args)
n = 0
def stop(self):
"""kills the thread
"""
self.__stopevent.set()
class StageOptions(object):
# class to handle stage-in and stage-out options
def __init__(self):
self.pilotStageInSource = None
self.pilotStageInDestination = None
self.pilotStageInProtocol = None
self.pilotStageOutSource = None
self.pilotStageOutDestination = None
self.pilotStageOutProtocol = None
self.destinationURL = None
#New Variables for Globus Online functions
#Stagein and Stageout are used to transfer standard files previous and after job
#execusion.
#OutputGO, WorkingendpointGO and Data are used to transfer the result data files
#to an final gridFTP Server.
#The GO stands for Globus Online
self.StageinGOsrc = None
self.StageinGOdest = None
self.StageoutGOsrc = None
self.StageoutGOdest = None
self.OutputGO = None
self.WorkingendpointGO = None
self.Data = None
################################################################################
# A u x i l i a r f u n c t i o n s
################################################################################
#def search_destinationURL(opts):
# """inspects the string opts (jobParameters)
# searching for the string like
# --destinationURL=gsiftp://host:port/filename
# If that string exists, it is extracted from opts
# and the value is returned
# """
# destinationURL = None
# index = opts.find('--destinationURL')
# if index > -1:
# whitesp_index = opts.find(' ',index)
# if whitesp_index > -1:
# # the string is in the middle of opts
# destinationURL = opts[index : whitesp_index]
# else:
# # the string is at the end of opts
# destinationURL = opts[index : ]
# opts = opts.replace(destinationURL, '')
# destinationURL = destinationURL.split('=')[1]
# print '======== jobParameters after parsing for destinationURL'
# print 'opts = %s' %opts
# print 'destinationURL = %s' %destinationURL
# print
# return opts, destinationURL
def search_stage_opts(opts):
"""inspects the string opts (jobParameters)
searching for any stage in/out option
Strings are like:
--pilotStageInSource=<blah>
"""
stageopts = StageOptions()
list_opts = opts.split()
rest_opts = ''
for opt in list_opts:
if opt.startswith('--pilotStageInProtocol'):
stageopts.pilotStageInProtocol = opt.split('=')[1]
elif opt.startswith('--pilotStageInSource'):
stageopts.pilotStageInSource = opt.split('=')[1]
elif opt.startswith('--pilotStageInDestination'):
stageopts.pilotStageInDestination = opt.split('=')[1]
elif opt.startswith('--pilotStageOutProtocol'):
stageopts.pilotStageOutProtocol = opt.split('=')[1]
elif opt.startswith('--pilotStageOutSource'):
stageopts.pilotStageOutSource = opt.split('=')[1]
elif opt.startswith('--pilotStageOutDestination'):
stageopts.pilotStageOutDestination = opt.split('=')[1]
elif opt.startswith('--destinationURL'):
stageopts.destinationURL = opt.split('=')[1]
elif opt.startswith('--StageinGOsrc'):
stageopts.StageinGOsrc = opt.split('=')[1]
elif opt.startswith('--StageinGOdest'):
stageopts.StageinGOdest = opt.split('=')[1]
elif opt.startswith('--StageoutGOsrc'):
stageopts.StageoutGOsrc = opt.split('=')[1]
elif opt.startswith('--StageoutGOdest'):
stageopts.StageoutGOdest = opt.split('=')[1]
elif opt.startswith('--OutputGO'):
stageopts.OutputGO = opt.split('=')[1]
elif opt.startswith('--WorkingendpointGO'):
stageopts.WorkingendpointGO = opt.split('=')[1]
elif opt.startswith('--Data'):
stageopts.Data = opt.split('=')[1]
else:
rest_opts += ' ' + opt
return rest_opts, stageopts
def list_stagein_cmds(stageopts):
list_sources = []
list_destinations = []
list_commands = []
if not stageopts.pilotStageInSource:
# no stage-in requested
return []
list_sources = stageopts.pilotStageInSource.split(',')
if stageopts.pilotStageInDestination:
list_destinations = stageopts.pilotStageInDestination.split(',')
else:
list_destinations = None
if stageopts.pilotStageIntProtocol:
list_protocols = stageopts.pilotStageInProtocol.split(',')
if len(list_protocols) != len(list_sources): # there was only one protocol
list_protocols = list_protocols*len(list_destinations)
else:
list_protocols = None
list_commands = map(stageincmd, list_sources, list_destinations, list_protocols)
return list_commands
def list_stageout_cmds(stageopts):
list_sources = []
list_destinations = []
list_commands = []
if not stageopts.pilotStageOutDestination:
# no stage-out requested
return []
list_destinations = stageopts.pilotStageOutDestination.split(',')
# VERY IMPORTANT NOTE: dest is first argument and src is the second one !!!
if stageopts.pilotStageOutSource:
list_sources = stageopts.pilotStageOutSource.split(',')
else:
list_sources = None
if stageopts.pilotStageOutProtocol:
list_protocols = stageopts.pilotStageOutProtocol.split(',')
if len(list_protocols) != len(list_sources): # there was only one protocol
list_protocols = list_protocols*len(list_sources)
else:
list_protocols = None
list_commands = map(stageoutcmd, list_destinations, list_sources, list_protocols)
return list_commands
def list_stagein_transfers(stageopts):
"""Creates list of transfer task during StageIn.
"""
list_sources = []
list_destinations = []
list_transfers = []
list_endpoint = []
if not stageopts.StageinGOsrc:
# no stage-in requested
return []
list_sources = stageopts.StageinGOsrc.split(',')
#list_destinations = stageopts.StageinGOdest.split(',')
list_destinations = stageopts.WorkingendpointGO.split(',')
#when more than 1 file as source, must repeate destination list to have right pairs.
while len(list_sources) != len(list_destinations):
list_destinations.append(list_destinations[0])
list_transfers = map(stageincmdGO, list_sources, list_destinations)
return list_transfers
def list_stageout_transfers(stageopts):
"""Creates list of transfer task during StageOut.
"""
list_sources = []
list_destinations = []
list_transfers = []
list_endpoint = []
if not stageopts.StageoutGOdest:
# no stage-out requested
return []
list_destinations = stageopts.StageoutGOdest.split(',')
list_sources = stageopts.StageoutGOsrc.split(',')
list_endpoint = stageopts.WorkingendpointGO.split(',')
while len(list_sources) != len(list_destinations):
list_destinations.append(list_destinations[0])
list_endpoint.append(list_endpoint[0])
list_transfers = map(stageoutcmdGO, list_destinations, list_sources, list_endpoint)
return list_transfers
def list_output_transfers(stageopts):
"""Creates list of transfer task during StageIne.
"""
list_data = []
list_destinations = []
list_transfers = []
list_endpoint = []
if not stageopts.Data:
# no stage-out requested
return []
list_endpoint = stageopts.WorkingendpointGO.split(',')
list_destinations = stageopts.OutputGO.split(',')
list_data = stageopts.Data.split(',')
while len(list_data) != len(list_destinations):
list_destinations.append(list_destinations[0])
list_endpoint.append(list_endpoint[0])
list_transfers = map(outcmdGO, list_destinations, list_data, list_endpoint)
print list_transfers
return list_transfers
def getproxy(userID):
"""retrieves user proxy credential from Cerns myproxy server
Uses the info of the submiter from panda before.
Code partial extracted from MyProxyUtil
"""
dn = userID
if dn.count('/CN=') > 1:
first_index = dn.find('/CN=')
second_index = dn.find('/CN=', first_index+1)
dn = dn[0:second_index]
arg = ['myproxy-logon','-s','myproxy.cern.ch','--no_passphrase','-l',dn]
print arg
subprocess.call(arg)
def outcmdGO(dest, src, endp):
"""create command for output file using Globus Online. Uses job directory information
to create output file path.
"""
#Activate the destination endpoint, a gridFTP server
destination = dest.split(':')[0]
arg = ['gsissh', 'cli.globusonline.org', 'endpoint-activate','-g', destination]
subprocess.call(arg)
#Create transfer task
cmd = 'gsissh cli.globusonline.org scp -v %s:%s/%s %s'%(endp,jobdir, src, dest)
return cmd
def stageincmdGO(src, dest):
"""create command for stage-in using Globus Online
"""
#Activate the endpoint at source
source = src.split(':')[0]
arg = ['gsissh', 'cli.globusonline.org', 'endpoint-activate','-g', source]
subprocess.call(arg)
#Create transfer task
cmd = 'gsissh cli.globusonline.org scp -v %s %s'%(src, dest)
return cmd
def stageoutcmdGO(dest, src, endp):
"""create command for stage-out using Globus Online
"""
#Activate the endpoint at destination
destination = dest.split(':')[0]
arg = ['gsissh', 'cli.globusonline.org', 'endpoint-activate','-g', destination]
subprocess.call(arg)
#Create transfer task
cmd = 'gsissh cli.globusonline.org scp -v %s %s'%(src, dest)
return cmd
def stageincmd(src, dest=None, protocol=None):
"""create command for stage-in
"""
# FIXME: check the format of src and dest
if protocol:
cmd = protocol
else:
# default value
cmd = 'globus-url-copy'
if dest:
cmd = cmd + ' %s %s' %(src, dest)
else:
filename = src.split('/')[-1]
cmd = cmd + ' %s file://${PWD}/%s' %(src, filename)
return cmd
def stageoutcmd(dest, src=None, protocol=None):
"""create command for stage-in
VERY IMPORTANT NOTE: dest is first argument and src is the second one !!!
"""
# FIXME: check the format of src and dest
if protocol:
cmd = protocol
else:
# default value
cmd = 'globus-url-copy'
if src:
cmd = cmd + ' %s %s' %(src, dest)
else:
filename = dest.split('/')[-1]
cmd = cmd + ' file://${PWD}/%s %s' %(filename, dest)
return cmd
def stageinwrapperGO(stageopts):
list_transfers = list_stagein_transfers(stageopts)
stageinfile = open('stagein.sh','w')
print >> stageinfile, '#!/bin/bash '
print >> stageinfile, ''
for cmd in list_transfers:
print >> stageinfile, cmd
stageinfile.close()
commands.getoutput('chmod +x stagein.sh')
def stageoutwrapperGO(stageopts):
list_transfers = list_stageout_transfers(stageopts)
stageoutfile = open('stageout.sh','w')
print >> stageoutfile, '#!/bin/bash '
print >> stageoutfile, ''
for cmd in list_transfers:
print >> stageoutfile, cmd
# preventing stageout.sh from being executed twice
# in case it is called from the transformation script
# by adding a lock file
print >> stageoutfile, 'touch ./stageoutlock'
stageoutfile.close()
commands.getoutput('chmod +x stageout.sh')
def outputwrapperGO(stageopts):
list_transfers = list_output_transfers(stageopts)
outputfile = open('output.sh','w')
print >> outputfile, '#!/bin/bash '
print >> outputfile, ''
for cmd in list_transfers:
print >> outputfile, cmd
outputfile.close()
commands.getoutput('chmod +x output.sh')
def stageinwrapper(stageopts):
list_cmds = list_stagein_cmds(stageopts)
stageinfile = open('stagein.sh','w')
print >> stageinfile, '#!/bin/bash '
print >> stageinfile, ''
for cmd in list_cmds:
print >> stageinfile, cmd
stageinfile.close()
commands.getoutput('chmod +x stagein.sh')
def stageoutwrapper(stageopts):
list_cmds = list_stageout_cmds(stageopts)
stageoutfile = open('stageout.sh','w')
print >> stageoutfile, '#!/bin/bash '
print >> stageoutfile, ''
for cmd in list_cmds:
print >> stageoutfile, cmd
# preventing stageout.sh from being executed twice
# in case it is called from the transformation script
# by adding a lock file
print >> stageoutfile, 'touch ./stageoutlock'
stageoutfile.close()
commands.getoutput('chmod +x stageout.sh')
#def gridFTP(fout, destinationURL):
# """if destinationURL is specified (is not None or NULL)
# the output file (fout) is staged-out with gridFTP
# """
# if destinationURL:
# cmd = 'globus-url-copy file://%s %s' %(fout, destinationURL)
# print
# print 'output file staged-out with gridFTP'
# print cmd
# print
# commands.getoutput(cmd)
def downloadGC():
"""Download Globus Connect file from Globus Online Server
"""
print "Downloading Globus Connect to local Filesystem:"
arg = ['curl','--connect-timeout','20','--max-time','120','-s','-S','http://confluence.globus.org/download/attachments/14516429/globusconnect','-o','globusconnect']
print arg
subprocess.call(arg)
def createEndpoint(endpoint):
"""Create the endpoint and returns the code for setup
"""
print "Inside createEndpoint"
print "Endpoint:"
print endpoint
arg = ['gsissh', 'cli.globusonline.org', 'endpoint-add','--gc', endpoint]
print arg
proc = subprocess.Popen(arg, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
i = 0
for line in proc.stdout:
print line.rstrip()
i += 1
if i == 3:
pin = line
return pin
def setupGC(endpoint):
"""Installing Globus Connect on working node. Creates endpoint, get the setup code and uses it on setup mode
"""
print "inside setupGC"
pin = createEndpoint(endpoint)
pin = pin.strip()
arg = ['sh', 'globusconnect', '-setup', pin]
print arg
subprocess.call(arg)
def removeEndpoint(endpoint):
"""Remove Endpoint used by Globus Connect
"""
print "------Removing Endpoint--------"
arg = ['gsissh', 'cli.globusonline.org', 'endpoint-remove', endpoint]
print arg
subprocess.call(arg)
def removeGC():
"""Removes Globus Connect and configuration files on working node
"""
print "-----Removing Globus Connect Files-----"
arg = ['rm','-rf','~/.globusonline/','globusconnect']
print arg
subprocess.call(arg)
def startGC():
"""Start Globus Connect on working node
"""
print "-----Running Globus Connect------"
arg = ['sh', 'globusconnect', '-start']
print arg
subprocess.Popen(arg)
def stopGC():
"""Stop Globus Connect on working node
"""
print "----Stopping Globus Connect-----"
arg = ['sh', 'globusconnect', '-stop']
print arg
subprocess.call(arg)
def getQueueData():
"""get the info about the panda queue
"""
cmd = "curl --connect-timeout 20 --max-time 120 -sS 'http://panda.cern.ch:25880/server/pandamon/query?tpmes=pilotpars&queue=%s&siteid=%s'" %(qname, site)
# a list of queue specifications is retrieved from the panda monitor
# Not the entire schedconf is retrieved, only what is setup in
# /monitor/Controller.py (line 81)
queuedata = commands.getoutput(cmd)
return queuedata
def getqueueparameter(par):
""" extract par from s """
queuedata = getQueueData()
matches = re.findall("(^|\|)([^\|=]+)=",queuedata)
for tmp,tmpPar in matches:
if tmpPar == par:
patt = "%s=(.*)" % par
idx = matches.index((tmp,tmpPar))
if idx+1 == len(matches):
patt += '$'
else:
patt += "\|%s=" % matches[idx+1][1]
mat = re.search(patt,queuedata)
if mat:
return mat.group(1)
else:
return ''
return ''
################################################################################
try:
site = os.environ['PANDA_SITE']
except:
site = ''
print "dataPilot.py: Running python from",sys.prefix,", platform", sys.platform, ", version", sys.version
try:
print "Python C API version", sys.api_version
except:
pass
jobdir = ''
grid = ''
batch = ''
logtgz = ''
sleeptime = 20
tpsvn = 'http://www.usatlas.bnl.gov/svn/panda/autopilot/trunk'
### command-line parameters
# First pull out pilotpars. Because getopt.getopt does not handle quoted params properly.
pat = re.compile('.*--pilotpars=(".*[^\\\]{1}").*')
optstr = ''
for a in sys.argv[1:]:
optstr += " %s" % a
mat = pat.match(optstr)
pilotpars = ''
if mat:
pilotpars = mat.group(1)
print "pilotpars: -->%s<--" % pilotpars
optstr = optstr.replace('--pilotpars=%s'%pilotpars,'')
optlist = optstr.split()
print "Options after removing pilotpars:",optlist
else:
optlist = sys.argv[1:]
# protect the option list from extraneous arguments slapped on by batch system,
# in particular when doing MPI submission
# reserve the lists, as we'll need those out of the loop scope
opts=[]
args=[]
valid = ("site=","tpid=","schedid=","pilotpars=","transurl=","queue=")
for opt in optlist:
try:
opt,arg = getopt.getopt( [opt], "", valid)
opts += opt
args += arg
except:
print "Warning: possible extraneous option skipped at index ", optlist.index(opt)
site = ''
tpid = '?'
schedid = '?'
transurl = ''
qname = ''
for o, a in opts:
if o == "--site":
site = a
if o == "--tpid":
tpid = a
if o == "--schedid":
schedid = a
if o == "--queue":
qname = a
if o == "--pilotpars":
pilotpars = a
if o == "--transurl":
transurl = a
if site != '':
print "Pilot will run with site identity", site
else:
site = 'TWTEST'
print "Pilot will run with default site identity", site
print "Running on queue", qname
## Check that curl is present
cmd = 'curl -V'
out = commands.getoutput(cmd)
if out.find('https') < 0:
print "dataPilot: ERROR: curl not found or not https-capable. Aborting."
print "!!FAILED!!2999!!trivialPilot: curl not found or not https-capable"
sys.exit(1)
host = commands.getoutput('hostname -f')
user = commands.getoutput('whoami')
workdir = ''
def getJob():
"""
Get a job from the Panda dispatcher
"""
global param
global glexec_flag
## Request job from dispatcher
print "==== Request Panda job from dispatcher"
# When glexec is True, then it is needed to read
# the value of 'credname' and 'myproxy' from the panda server
# This info is delivered only if getProxyKey is "TRUE"
# If glexec is needed or not in this site is known
# thru parameter 'glexec' included in queuedata.txt
glexec_flag = getqueueparameter('glexec').capitalize()
data = {
'siteName':site,
'node':host,
'prodSourceLabel':'user', #'test'
'computingElement':qname,
'getProxyKey':glexec_flag,
}
maxtries = 3
ntries = 0
status = 99
PandaID = 0
while status != 0:
ntries += 1
if ntries > maxtries:
break
# performs a loop while dispatcher has no job (status==20)
# or until the maxtime (10 min) passes
totaltime = 0
while totaltime < 10*60:
print 'trial after %s seconds' %totaltime
status, param, response = utils.toServer(baseURLSSL,'getJob',data,os.getcwd())
if status != 20:
break
time.sleep(60)
totaltime += 60
print 'results from getJob: '
print ' status = %s' %status
print ' param = %s' %param
print ' response = %s' %response
if status == 0:
print "==== Job retrieved:"
## Add VO as a param. Needs to go into schema. $$$
param['vo'] = 'OSG'
PandaID = param['PandaID']
for p in param:
print " %s=%s" % ( p, param[p] )
elif status == SC_TimeOut:
# allow more tries
print "Sleep for %s" % sleeptime
time.sleep(sleeptime)
else:
break
return status, PandaID
def runJob():
"""
Run the assigned job
"""
print "==== Running PandaID", param['PandaID']
if transurl != '':
# Take transformation URL from the pilot wrapper parameters
trf = transurl
else:
trf = param['transformation']
#if not trf.startswith('http'):
# print "Bad transformation field '%s': should be url." % trf
# print "!!FAILED!!2999!!Bad transformation field, should be url"
# sys.exit(1)
pid = param['PandaID']
# set up workdir
global cleanup
global workdir
global jobdir
global logtgz
if workdir != '':
jobdir = workdir+'/panda-'+pid
out = commands.getoutput('mkdir %s'%jobdir)
cleanup = True
else:
workdir = './'
jobdir = os.getcwd()
cleanup = False
print "Job directory is", jobdir
opts = param['jobPars']
#opts, destinationURL = search_destinationURL(opts)
opts, stageopts = search_stage_opts(opts)
#destinationURL = stageopts.destinationURL
#stageinwrapper(stageopts)
#stageoutwrapper(stageopts)
#Retrieve user proxy credential
userDN = param['prodUserID']
getproxy(userDN)
endpoint = stageopts.WorkingendpointGO
print "Launching donwloadGC function"
downloadGC()
time.sleep(10)
print "lauching setupGC function"
setupGC(endpoint)
print "Launching Globus Connect with -Start"
startGC()
time.sleep(30)
stageinwrapperGO(stageopts)
stageoutwrapperGO(stageopts)
outputwrapperGO(stageopts)
## Set environment variables for job use
env = "export PandaID=%s;" % pid
env += "export PandaSite=%s;" % site
env += "export QueueName=%s;" % qname
env += "export PandaWorkdir=%s;" % workdir
env += "export dispatchBlock=%s;" % param['dispatchDblock']
env += "export destinationBlock=%s;" % param['destinationDblock']
if param.has_key('swRelease'):
env += "export swRelease=%s;" % param['swRelease']
if param.has_key('homepackage'):
env += "export homepackage=%s;" % param['homepackage']
if param.has_key('logFile'):
env += "export logFile=%s;" % param['logFile']
logtgz = param['logFile']
if not os.environ.has_key("APP"):
if os.environ.has_key("OSG_APP"): env += "export APP=%s;" % os.environ["OSG_APP"]
elif os.environ.has_key("VO_ATLAS_SW_DIR"): env += "export APP=%s;" % os.environ["VO_ATLAS_SW_DIR"]
if trf.startswith('http'):
cmd = '%s cd %s; curl --insecure -s -S -o run.sh %s; chmod +x run.sh ' % ( env, jobdir, trf )
else:
# if the transformation is not an URL then it is the path to the executable
# in this case the run.sh script is created by hand just pointing to this path
cmd = '%s cd %s; echo "#!/bin/bash" > run.sh; echo "%s \$@" >> run.sh; chmod +x run.sh' %(env, jobdir, trf)
st, out = commands.getstatusoutput(cmd)
if st != 0:
print "!!FAILED!!2999!!Error in user job script setup"
print "ERROR in script setup"
print "Command:", cmd
print "Output:", out
return st
# Adding pilotpars, when it is not empty, to the options list
if pilotpars != '':
opts += ' --pilotpars=%s' %pilotpars
## -------------------------------------------------
## Run the job
## -------------------------------------------------
#### BEGIN TEST ####
#cmd = 'cd %s; %s ./run.sh %s' % ( jobdir, env, opts )
#cmd = 'cd %s; %s ./stagein.sh; ./run.sh %s; ./stageout.sh' % ( jobdir, env, opts )
# The script stageout.sh is executed only if there is no lock file.
# This lock file can be created by the script itself if it has been invoked previously
# from the transformation script
cmd = 'cd %s; %s ./stagein.sh; ./run.sh %s; [ ! -f ./stageoutlock ] && ./stageout.sh; ./output.sh' % ( jobdir, env, opts )
# FIXME
# in the future, instead of a monster cmd string,
# it should be in a wrapper script or any other solution
#### END TEST ####
# executing the job
print 'command to be executed is = %s' %cmd
global glexec_flag
out=''
if glexec_flag:
print "executing payload under glexec"
myproxy_server = param['myproxy']
MyPI = myproxyUtils.MyProxyInterface(myproxy_server)
MyPI.userDN = param['prodUserID']
MyPI.credname = param['credname']
glexec = myproxyUtils.executeGlexec(MyPI)
glexec.payload = cmd
glexec.opts = opts
glexec.execute()
st = glexec.status
out = glexec.output
else:
######## BEGIN TEST #######
#st, out = commands.getstatusoutput(cmd)
if 'LBNE_DAYA' in site: #FIXME: this is just a temporary solution !!
import subprocess
popen = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = ''
for line in iter(popen.stdout.readline, ""):
#print line # this is for the stdout
print line[:-1] # this is for the stdout, and removing the final \n
out += line + '\n' # this is to record it in a file
###out += line # this is to record it in a file
st = popen.wait()
else:
st, out = commands.getstatusoutput(cmd)
######## END TEST #######
# writing the output in job.out
fout = '%s/job.out'%jobdir
fh = open(fout,'w')
fh.write(out+'\n')
fh.close()
print "\n==== Job script output written to", fout
# If destinationURL was specified,
# stage-out the output file with gridFTP
#gridFTP(fout, destinationURL)
# ------------------------------------------------------
# analyzing the output searching for error messages
############ BEGIN TEST ##########
if 'LBNE_DAYA' in site: #FIXME: this is just a temporary solution !!
jobstat = analyzeoutput_dayabay(jobdir)
else:
jobstat = analyzeoutput_default(st, jobdir)
############ END TEST ##########
if jobstat == 0:
print "Final job status: success"
else:
print "Final job status: error code", jobstat
# ------------------------------------------------------
#print '\n======== Job script output:\n',out,'\n======== End of job script output\n'
stopGC()
time.sleep(20)
removeEndpoint(endpoint)
time.sleep(10)
removeGC()
return jobstat
def analyzeoutput_default(st, jobdir):
jobstat = 0
if st != 0:
print "ERROR: trivialPilot: Job script failed. Status=%s" % st
print "======== Job script run.sh content:"
print commands.getoutput('cat run.sh')
else:
print "!!INFO!!0!!Job script completed OK. Status=0"
errcodes = commands.getoutput("grep '!!FAILED!!' %s/job.out"%jobdir)
if len(errcodes) > 0:
print "---- Synposis of FAILED messages in job:"
print errcodes
warncodes = commands.getoutput("grep '!!WARNING!!' %s/job.out"%jobdir)
if len(warncodes) > 0:
print "---- Synposis of WARNING messages in job:"
print warncodes
infocodes = commands.getoutput("grep '!!INFO!!' %s/job.out"%jobdir)
if len(infocodes) > 0:
print "---- Synposis of INFO messages in job:"
print infocodes
pat = re.compile('.*!!FAILED!!([0-9]+)!!(.*)$')
mat = pat.search(errcodes)
if mat:
jobstat = 1
return jobstat
def analyzeoutput_dayabay(jobdir):
# possible error messages in the output of a dayabay job
# "ERROR":1000001 -- removed on David Jaffe's request on 20110513
errors_msgs = {"FATAL": 1000002,
"segmentation violation": 1000003,
"IOError": 1000004,
"ValueError": 1000005}
# command to search for any of those error messages in the output
cmd = 'egrep "%s" %s/job.out' %('|'.join(errors_msgs.keys()),jobdir)
errline = commands.getoutput(cmd)
if errline:
# errline is not empty => some of the error messages was detected
print 'errline: ', errline
for err, code in errors_msgs.iteritems():
if err in errline:
# jobstat is the value corresponding with that key in the dictionary
# the key is the first of the error messages found
print 'err and code: ', err, code
return code
# if everything was fine...
return 0
def shutdown(jobstat):
"""
Report to dispatcher, clean up and shut down the pilot
"""
global param
pid = param['PandaID']
print "==== Cleanup for PandaID", pid
if cleanup:
cmd = 'rm -rf %s'%jobdir
print cmd
out = commands.getoutput(cmd)
## Report completion to dispatcher
if jobstat == 0:
state = 'finished'
else:
state = 'failed'
endJob(pid, state, jobstat)
def endJob(pid, state, jobstat):
data = {}
data['node'] = host
data['siteName'] = site
data['jobId'] = pid
data['schedulerID'] = schedid
data['pilotID'] = os.environ.get('GTAG', tpid)
data['state'] = state
data['timestamp'] = utils.timeStamp()
data['transExitCode'] = jobstat
data['computingElement'] = qname
print "== Updating Panda with completion info"
status, pars, response = utils.toServer(baseURLSSL,'updateJob',data,os.getcwd())
if status != 0:
print "Error contacting dispatcher to update job status: return value=%s" % status
else:
if jobstat == 0:
print "==== PandaID %s successful completion reported to dispatcher" % pid
print "!!FINISHED!!0!!PandaID %s done" % pid
else:
print "==== PandaID %s failed completion reported to dispatcher" % pid
print "!!FAILED!!2999!!PandaID %s done" % pid
print "==== Directory at job completion:"
print commands.getoutput('pwd; ls -al')
def updateJob(pid):
data = {}
data['node'] = host
data['siteName'] = site
data['jobId'] = pid
data['schedulerID'] = schedid
data['pilotID'] = os.environ.get('GTAG', tpid)
data['state'] = 'running'
data['timestamp'] = utils.timeStamp()
# update server
print "== Updating Panda with running state"
status, pars, response = utils.toServer(baseURLSSL,'updateJob',data,os.getcwd())
if status != 0:
print "Error contacting dispatcher to update job status: status=%s" % status
else:
print "==== PandaID %s running status reported to dispatcher" % pid
# ======================================================================
if __name__ == "__main__":
status, PandaID = getJob()
if status == 0:
status = updateJob(PandaID)
heartbeat = ThreadHandler(30*60, updateJob, PandaID)
heartbeat.start()
status = runJob()
heartbeat.stop()
shutdown(status)
| apache-2.0 |
skavulya/spark-tk | integration-tests/tests/test_frame_pandas.py | 12 | 1882 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setup import tc, rm, get_sandbox_path
from sparktk import dtypes
def test_frame_to_pandas_to_frame(tc):
"""
Tests going from a frame to a pandas df (to_pandas) and then back to a frame (import_pandas)
"""
# Create a frame from a csv file for testing
path = "../datasets/importcsvtest.csv"
frame1 = tc.frame.import_csv(path, header=True)
# bring to data frame and check the columns/types/row count
df = frame1.to_pandas()
assert(df.columns.tolist() == ['string_column', 'integer_column', 'float_column', 'datetime_column'])
assert([str(d) for d in df.dtypes] == ['object', 'int32', 'float64', 'datetime64[ns]'])
assert(frame1.count() == len(df))
# import the data frame back to a frame
frame2 = tc.frame.import_pandas(df, frame1.schema, validate_schema=True)
# compare this frame to the original frame
assert(len(frame1.schema) == len(frame2.schema))
for col1, col2 in zip(frame1.schema, frame2.schema):
assert(col1[0] == col2[0])
assert(dtypes.dtypes.get_from_type(col1[1]) == dtypes.dtypes.get_from_type(col2[1]))
assert(frame2.take(frame2.count()) == frame1.take(frame1.count()))
| apache-2.0 |
harshaneelhg/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
AndrewGYork/tools | pco.py | 1 | 49855 | import time
import ctypes as C
import os
import numpy as np
class Camera:
def __init__(self, verbose=True, very_verbose=False):
self.verbose = verbose
self.very_verbose = very_verbose
self.camera_handle = C.c_void_p(0)
if verbose: print("Opening pco camera...")
try:
# This command opens the next pco camera; if you want to
# have multiple cameras, and pick which one you're opening,
# I'd have to implement PCO_OpenCameraEx, which would
# require me to understand PCO_OpenStruct.
dll.open_camera(self.camera_handle, 0)
assert self.camera_handle.value is not None
except (WindowsError, AssertionError):
print("Failed to open pco camera.")
print(" *Is the camera on, and plugged into the computer?")
print(" *Is CamWare running? It shouldn't be!")
print(" *If using a CameraLink camera, is sc2_cl_me4.dll in",
"the same directory as SC2_Cam.dll?")
raise
self._get_camera_type()
if self.verbose: print(" pco.%s camera open." % self.camera_type)
dll.reset_settings_to_default(self.camera_handle)
self.disarm()
self._refresh_camera_setting_attributes()
return None
def close(self):
self.disarm()
if self.verbose: print("Closing pco.%s camera..." % self.camera_type)
dll.close_camera(self.camera_handle)
if self.verbose: print(" Camera closed.")
return None
def apply_settings(
self,
trigger='auto_trigger',
exposure_time_microseconds=2200,
region_of_interest={'left': 1,
'right': 2060,
'top': 1,
'bottom': 2048}
):
"""
* 'trigger' can be 'auto_trigger' or 'external_trigger' See the
comment block in _get_trigger_mode() for further details.
* 'exposure_time_microseconds' can be as low as 100 and as high
as 10000000.
* 'region_of_interest' will be adjusted to match the nearest
legal ROI that the camera supports. See _legalize_roi() for
details.
"""
if trigger is None:
trigger = self.trigger_mode
if exposure_time_microseconds is None:
exposure_time_microseconds = self.exposure_time_microseconds
if region_of_interest is None:
region_of_interest = self.roi
if self.armed: self.disarm()
if self.verbose: print("Applying settings to camera...")
# These settings matter, but we don't expose their functionality
# through apply_settings():
dll.reset_settings_to_default(self.camera_handle)
self._set_sensor_format('standard')
self._set_acquire_mode('auto')
try:
self._set_pixel_rate({'edge 4.2': 272250000,
'edge 4.2 bi': 46000000,
'edge 5.5': 286000000,
'pixelfly': 24000000, # Read from camera once
'panda 4.2': 44000000, # Read from camera once
}[self.camera_type])
except WindowsError:
# TODO we can remove when we don't have any pandas with older firmware
self._set_pixel_rate(0)
print('WARNING! Setting pixel rate to zero (for older versions of '
' panda firmware)')
# I think these settings don't matter for the pco.edge, but just
# in case...
self._set_storage_mode('recorder')
self._set_recorder_submode('ring_buffer')
# These settings change all the time:
self._set_trigger_mode(trigger)
self._set_exposure_time(exposure_time_microseconds)
self._set_roi(region_of_interest)
# It's good to check the camera health periodically. Now's as
# good a time as any, especially since the expected result is
# predictable: it should all be zeros.
camera_health = self._get_camera_health()
for k, v in camera_health.items():
if k == 'status' and self.camera_type == 'panda 4.2':
# TODO remove tolerance of status 16. This is only for older
# firmware versions of the panda.
assert v == 0 or v == 16, 'Status code %d' % v
else:
assert v == 0
return None
def arm(self, num_buffers=None):
if not hasattr(self, '_default_num_buffers'):
self._default_num_buffers = 2
if num_buffers is None:
num_buffers = self._default_num_buffers
assert 1 <= num_buffers <= 16
self._default_num_buffers = num_buffers
if self.armed:
if self.verbose:
print('Arm requested, but the pco camera'
'is already armed. Disarming...')
self.disarm()
if self.verbose: print("Arming camera...")
dll.arm_camera(self.camera_handle)
wXRes, wYRes, wXResMax, wYResMax = (
C.c_uint16(), C.c_uint16(), C.c_uint16(), C.c_uint16())
dll.get_sizes(self.camera_handle, wXRes, wYRes, wXResMax, wYResMax)
self.width, self.height = wXRes.value, wYRes.value
self.bytes_per_image = self.width * self.height * 2 #16 bit images
if self.very_verbose:
print(" Camera ROI dimensions:",
self.width, "(l/r) by", self.height, "(u/d)")
# Allocate buffers that the camera will use to hold images.
self.buffer_pointers = []
for i in range(num_buffers):
buffer_number = C.c_int16(-1)
self.buffer_pointers.append(C.POINTER(C.c_uint16)()) #Woo!
buffer_event = C.c_void_p(0)
dll.allocate_buffer(
self.camera_handle,
buffer_number,
self.bytes_per_image,
self.buffer_pointers[-1],
buffer_event)
assert buffer_number.value == i
if self.very_verbose:
print(" Buffer number ", i, " allocated, pointing to ",
self.buffer_pointers[-1].contents,
", linked to event ", buffer_event.value, sep='')
dll.set_image_parameters(self.camera_handle, self.width, self.height)
dll.set_recording_state(self.camera_handle, 1)
self.armed = True
if self.verbose: print(" Camera armed.")
# Add our allocated buffers to the camera's 'driver queue'
self.added_buffers = []
for buf_num in range(len(self.buffer_pointers)):
dll.add_buffer(
self.camera_handle,
0,
0,
buf_num,
self.width,
self.height,
16)
self.added_buffers.append(buf_num)
self._dll_status = C.c_uint32()
self._driver_status = C.c_uint32()
self._image_datatype = C.c_uint16 * self.width * self.height
return None
def disarm(self):
if not hasattr(self, 'armed'):
self.armed = False
if not self.armed:
if self.camera_type == 'pixelfly':
return None # pixelfly throws an error if disarmed twice
if self.verbose: print("Disarming camera...")
dll.set_recording_state(self.camera_handle, 0)
dll.cancel_images(self.camera_handle)
if hasattr(self, 'buffer_pointers'): #free allocated buffers
for buf in range(len(self.buffer_pointers)):
dll.free_buffer(self.camera_handle, buf)
self.buffer_pointers = []
self.armed = False
if self.verbose: print(" Camera disarmed.")
return None
def record_to_memory(
self,
num_images,
preframes=0,
out=None,
first_frame=0,
poll_timeout=5e5,
sleep_timeout=40,
first_trigger_timeout_seconds=0,
):
if not self.armed: self.arm()
# We'll store our images in a numpy array. Did the user provide
# one, or should we allocate one ourselves?
if out is None:
first_frame = 0
out = np.zeros((num_images - preframes, self.height, self.width),
'uint16')
out[:, 1, 1].fill(1) # For error checking empty images
return_value = out
else:
return_value = None # Output is placed in the 'out' array
try:
assert len(out.shape) == 3
assert (out.shape[0] - first_frame) >= (num_images - preframes)
assert (out.shape[1], out.shape[2]) == (self.height, self.width)
assert out.dtype == np.uint16
except AssertionError:
print("\nInput argument 'out' must have dimensions:")
print("(>=num_images - preframes, y-resolution, x-resolution)")
print(" and dtype='uint16'")
raise
except AttributeError:
print("\nInput argument 'out' must be a numpy array",
"(to hold our images)")
raise
# Try to record some images, and try to tolerate the many
# possible ways this can fail.
if self.verbose: print("Acquiring", num_images, "images...")
num_acquired = 0
for which_im in range(num_images):
# Hassle the camera until it gives us a buffer. The only
# ways we exit this 'while' loop are by getting a buffer or
# running out of patience.
self.num_polls = 0
self.num_sleeps = 0
start_time = time.perf_counter()
while True:
# Check if a buffer is ready
self.num_polls += 1
dll.get_buffer_status(
self.camera_handle,
self.added_buffers[0],
self._dll_status,
self._driver_status)
if self._dll_status.value == 0xc0008000:
buffer_number = self.added_buffers.pop(0)#Removed from queue
if self.very_verbose:
print(" After", self.num_polls, "polls and", self.num_sleeps,
"sleeps, buffer", buffer_number, "is ready.")
break
# The buffer isn't ready. How long should we wait to try
# again? For short exposures, we'd like to poll super
# frequently. For long exposures, we'll use time.sleep()
# to save CPU.
if self.exposure_time_microseconds > 30e3:
time.sleep(self.exposure_time_microseconds * 1e-6 * #seconds
2 / sleep_timeout) #Worst case
self.num_sleeps += 1
# At some point we have to admit we probably missed a
# trigger, and give up. Give up after too many polls
# (likely triggered by short exposures) or too many
# sleeps (likely triggered by long exposures)
if self.num_polls > poll_timeout or self.num_sleeps > sleep_timeout:
elapsed_time = time.perf_counter() - start_time
if which_im == 0: # First image; maybe keep waiting...
if elapsed_time < first_trigger_timeout_seconds:
continue
raise TimeoutError(
"After %i polls,"%(self.num_polls) +
" %i sleeps"%(self.num_sleeps) +
" and %0.3f seconds,"%(elapsed_time) +
" no buffer. (%i acquired)"%(num_acquired),
num_acquired=num_acquired)
try:
if self._driver_status.value == 0x0:
pass
elif self._driver_status.value == 0x80332028:
# Zero the rest of the buffer
out[max(0, first_frame + (which_im - preframes)):, :, :
].fill(0)
raise DMAError('DMA error during record_to_memory')
else:
print("Driver status:", self._driver_status.value)
raise UserWarning("Buffer status error")
if self.very_verbose:
print(" Record to memory result:",
hex(self._dll_status.value),
hex(self._driver_status.value))
if which_im >= preframes:
# http://stackoverflow.com/a/13481676
image = np.ctypeslib.as_array( #Temporary!
self._image_datatype.from_address(
C.addressof(
self.buffer_pointers[buffer_number].contents)))
out[first_frame + (which_im - preframes), :, :] = image
num_acquired += 1
finally:
dll.add_buffer(#Put the buffer back in the driver queue
self.camera_handle,
0,
0,
buffer_number,
self.width,
self.height,
16)
self.added_buffers.append(buffer_number)
if self.verbose: print("Done acquiring.")
return return_value
def _refresh_camera_setting_attributes(self):
"""
There are two ways to access a camera setting:
1. Ask the camera directly, using a self._get_*() - type method.
This interrogates the camera via a DLL call, updates the
relevant attribute(s) of the Edge object, and returns the
relevant value(s). This is slower, because you have to wait for
round-trip communication, but gets you up-to-date info.
2. Access an attribute of the camera object, e.g. self.roi
This ignores the camera, which is very fast, but the resulting
value could potentially be inconsistent with the camera's true
setting (although I hope it isn't!)
_refresh_camera_setting_attributes() is a convenience function
to update all the camera attributes at once. Call it if you're
nervous, I guess.
"""
if self.verbose: print("Retrieving settings from camera...")
self._get_camera_type()
self._get_timestamp_mode()
self._get_sensor_format()
self._get_trigger_mode()
self._get_storage_mode()
self._get_recorder_submode()
self._get_acquire_mode()
self._get_pixel_rate()
self._get_exposure_time()
self._get_roi()
self._get_temperature()
self._get_camera_health()
return None
def _get_timestamp_mode(self):
wTimeStamp = C.c_uint16(777) #777 is not an expected output
dll.get_timestamp_mode(self.camera_handle, wTimeStamp)
assert wTimeStamp.value in (0, 1, 2, 3) #wTimeStamp.value should change
mode_names = {0: "off",
1: "binary",
2: "binary+ASCII",
3: "ASCII"}
if self.verbose:
print(" Timestamp mode:", mode_names[wTimeStamp.value])
self.timestamp_mode = mode_names[wTimeStamp.value]
return self.timestamp_mode
def _set_timestamp_mode(self, mode='off'):
mode_numbers = {"off": 0,
"binary": 1,
"binary+ASCII": 2,
"ASCII": 3}
if self.verbose:
print(" Setting timestamp mode to:", mode)
dll.set_timestamp_mode(self.camera_handle, mode_numbers[mode])
assert self._get_timestamp_mode() == mode
return self.timestamp_mode
def _get_sensor_format(self):
wSensor = C.c_uint16(777) #777 is not an expected output
dll.get_sensor_format(self.camera_handle, wSensor)
assert wSensor.value in (0, 1) #wSensor.value should change
mode_names = {0: "standard", 1: "extended"}
if self.very_verbose:
print(" Sensor format:", mode_names[wSensor.value])
self.sensor_format = mode_names[wSensor.value]
return self.sensor_format
def _set_sensor_format(self, mode='standard'):
mode_numbers = {"standard": 0, "extended": 1}
if self.very_verbose:
print(" Setting sensor format to:", mode)
dll.set_sensor_format(self.camera_handle, mode_numbers[mode])
assert self._get_sensor_format() == mode
return self.sensor_format
def _get_camera_health(self):
dwWarn, dwErr, dwStatus = (
C.c_uint32(), C.c_uint32(), C.c_uint32())
dll.get_camera_health(self.camera_handle, dwWarn, dwErr, dwStatus)
if self.verbose:
print(" Camera health status:", end='')
print(" Warnings:", dwWarn.value, end='')
if dwWarn.value == 0:
print(" (good)", end='')
else:
print("***BAD***")
print(" / Errors:", dwErr.value, end='')
if dwErr.value == 0:
print(" (good)", end='')
else:
print("***BAD***")
print(" / Status:", dwStatus.value)
self.camera_health = {
'warnings': dwWarn.value,
'errors': dwErr.value,
'status': dwStatus.value}
return self.camera_health
def _get_temperature(self):
ccdtemp, camtemp, powtemp = (
C.c_int16(), C.c_int16(), C.c_int16())
dll.get_temperature(self.camera_handle, ccdtemp, camtemp, powtemp)
if self.verbose:
print(" Temperatures:",
"CCD", ccdtemp.value * 0.1, "C /",
"camera", camtemp.value, "C /",
"power supply", powtemp.value, "C ")
self.temperature = {
'ccd_temp': ccdtemp.value * 0.1,
'camera_temp': camtemp.value,
'power_supply_temp': powtemp.value}
return self.temperature
def _get_trigger_mode(self):
"""
0x0000 = [auto trigger]
A new image exposure is automatically started best possible
compared to the readout of an image. If a CCD is used and the
images are taken in a sequence, then exposures and sensor readout
are started simultaneously. Signals at the trigger input (<exp
trig>) are irrelevant.
- 0x0001 = [software trigger]:
An exposure can only be started by a force trigger command.
- 0x0002 = [extern exposure & software trigger]:
A delay / exposure sequence is started at the RISING or FALLING
edge (depending on the DIP switch setting) of the trigger input
(<exp trig>).
- 0x0003 = [extern exposure control]:
The exposure time is defined by the pulse length at the trigger
input(<exp trig>). The delay and exposure time values defined by
the set/request delay and exposure command are ineffective.
(Exposure time length control is also possible for double image
mode; exposure time of the second image is given by the readout
time of the first image.)
"""
trigger_mode_names = {0: "auto_trigger",
1: "software_trigger",
2: "external_trigger",
3: "external_exposure"}
wTriggerMode = C.c_uint16()
dll.get_trigger_mode(self.camera_handle, wTriggerMode)
if self.verbose:
print(" Trigger mode:", trigger_mode_names[wTriggerMode.value])
self.trigger_mode = trigger_mode_names[wTriggerMode.value]
return self.trigger_mode
def _set_trigger_mode(self, mode="auto_trigger"):
trigger_mode_numbers = {
"auto_trigger": 0,
"software_trigger": 1,
"external_trigger": 2,
"external_exposure": 3}
if self.verbose: print(" Setting trigger mode to:", mode)
dll.set_trigger_mode(self.camera_handle, trigger_mode_numbers[mode])
assert self._get_trigger_mode() == mode
return self.trigger_mode
def _force_trigger(self):
assert self.trigger_mode in ('software_trigger', 'external_trigger')
wTriggerMode = C.c_uint16()
dll.force_trigger(self.camera_handle, wTriggerMode)
assert wTriggerMode.value in (0, 1)
return bool(wTriggerMode.value)
def _get_storage_mode(self):
wStorageMode = C.c_uint16()
dll.get_storage_mode(self.camera_handle, wStorageMode)
storage_mode_names = {0: "recorder",
1: "FIFO_buffer"}
if self.very_verbose:
print(" Storage mode:", storage_mode_names[wStorageMode.value])
self.storage_mode = storage_mode_names[wStorageMode.value]
return self.storage_mode
def _set_storage_mode(self, mode="recorder"):
storage_mode_numbers = {"recorder": 0,
"FIFO_buffer": 1}
if self.very_verbose: print(" Setting storage mode to:", mode)
dll.set_storage_mode(self.camera_handle, storage_mode_numbers[mode])
assert self._get_storage_mode() == mode
return self.storage_mode
def _get_recorder_submode(self):
wRecSubmode = C.c_uint16(1)
dll.get_recorder_submode(self.camera_handle, wRecSubmode)
recorder_submode_names = {0: "sequence",
1: "ring_buffer"}
if self.very_verbose:
print(" Recorder submode:",
recorder_submode_names[wRecSubmode.value])
self.recorder_submode = recorder_submode_names[wRecSubmode.value]
return self.recorder_submode
def _set_recorder_submode(self, mode="ring_buffer"):
recorder_mode_numbers = {
"sequence": 0,
"ring_buffer": 1}
if self.very_verbose: print(" Setting recorder submode to:", mode)
dll.set_recorder_submode(
self.camera_handle, recorder_mode_numbers[mode])
assert self._get_recorder_submode() == mode
return self.recorder_submode
def _get_acquire_mode(self):
wAcquMode = C.c_uint16(0)
dll.get_acquire_mode(self.camera_handle, wAcquMode)
acquire_mode_names = {0: "auto",
1: "external_static",
2: "external_dynamic"}
if self.very_verbose:
print(" Acquire mode:", acquire_mode_names[wAcquMode.value])
self.acquire_mode = acquire_mode_names[wAcquMode.value]
return self.acquire_mode
def _set_acquire_mode(self, mode='auto'):
acquire_mode_numbers = {"auto": 0,
"external_static": 1,
"external_dynamic": 2}
if self.very_verbose: print(" Setting acquire mode to:", mode)
dll.set_acquire_mode(self.camera_handle, acquire_mode_numbers[mode])
assert self._get_acquire_mode() == mode
return self.acquire_mode
def _get_pixel_rate(self):
dwPixelRate = C.c_uint32(0)
dll.get_pixel_rate(self.camera_handle, dwPixelRate)
## TODO: Checking of the reported pixel rate could be greatly improved.
if self.camera_type == 'panda 4.2':
# TODO: Older versions of the panda firmware report the pixel rate
# as zero. This can be removed once we dont have any cameras with
# the older firmware.
assert dwPixelRate.value != 0 or dwPixelRate.value == 0
else:
assert dwPixelRate.value != 0
if self.very_verbose: print(" Pixel rate:", dwPixelRate.value)
self.pixel_rate = dwPixelRate.value
return self.pixel_rate
def _set_pixel_rate(self, rate=272250000):
if self.very_verbose: print(" Setting pixel rate to:", rate)
dll.set_pixel_rate(self.camera_handle, rate)
assert self._get_pixel_rate() == rate
return self.pixel_rate
def _get_exposure_time(self):
dwDelay = C.c_uint32(0)
wTimeBaseDelay = C.c_uint16(0)
dwExposure = C.c_uint32(0)
wTimeBaseExposure = C.c_uint16(1)
dll.get_delay_exposure_time(
self.camera_handle,
dwDelay,
dwExposure,
wTimeBaseDelay,
wTimeBaseExposure)
time_base_mode_names = {0: "nanoseconds",
1: "microseconds",
2: "milliseconds"}
if self.verbose:
print(" Exposure:", dwExposure.value,
time_base_mode_names[wTimeBaseExposure.value])
if self.very_verbose:
print(" Delay:", dwDelay.value,
time_base_mode_names[wTimeBaseDelay.value])
self.exposure_time_microseconds = (
dwExposure.value * 10.**(3*wTimeBaseExposure.value - 3))
self.delay_time = dwDelay.value
return self.exposure_time_microseconds
def _set_exposure_time(self, exposure_time_microseconds=2200):
exposure_time_microseconds = int(exposure_time_microseconds)
if self.camera_type in ('edge 4.2', 'edge 5.5', 'pixelfly'):
assert 1e2 <= exposure_time_microseconds <= 1e7
elif self.camera_type == 'panda 4.2':
## TODO: is the minimum exposure really 9 us?
assert 9 <= exposure_time_microseconds <= 1e7
if self.verbose:
print(" Setting exposure time to", exposure_time_microseconds, "us")
dll.set_delay_exposure_time(
self.camera_handle, 0, exposure_time_microseconds, 1, 1)
self._get_exposure_time()
if self.camera_type == 'panda 4.2':
tolerance = 13
elif self.camera_type == 'edge 4.2 bi':
tolerance = 6
else:
tolerance = 0
assert abs(self.exposure_time_microseconds -
exposure_time_microseconds) <= tolerance
return self.exposure_time_microseconds
def _get_roi(self):
wRoiX0, wRoiY0, wRoiX1, wRoiY1 = (
C.c_uint16(), C.c_uint16(),
C.c_uint16(), C.c_uint16())
dll.get_roi(self.camera_handle, wRoiX0, wRoiY0, wRoiX1, wRoiY1)
if self.verbose:
print(" Camera ROI:");
print(" From pixel", wRoiX0.value, "to pixel", wRoiX1.value, "(left/right)")
print(" From pixel", wRoiY0.value, "to pixel", wRoiY1.value, "(up/down)")
self.roi = {
'left': wRoiX0.value,
'top': wRoiY0.value,
'right': wRoiX1.value,
'bottom': wRoiY1.value}
self.width = self.roi['right'] - self.roi['left'] + 1
self.height = self.roi['bottom'] - self.roi['top'] + 1
self.rolling_time_microseconds = self._calculate_rolling_time_us(
wRoiY0.value, wRoiY1.value)
return self.roi
def _calculate_rolling_time_us(self, y0, y1):
'''How long do we expect the chip to spend rolling, in microseconds?
Both the 4.2 and the 5.5 take ~10 ms to roll the full chip. Calculate
the fraction of the chip we're using and estimate the rolling
time.
'''
if self.camera_type == 'edge 4.2':
max_lines = 1024
full_chip_rolling_time = 1e4
elif self.camera_type == 'edge 5.5':
max_lines = 1080
full_chip_rolling_time = 1e4
elif self.camera_type in ('panda 4.2', 'edge 4.2 bi'):
max_lines = 1024
full_chip_rolling_time = 2.5e4 # TODO: verify rolling time for panda
# TODO: calculate rolling time for pixelfly...better
elif self.camera_type == 'pixelfly':
full_chip_rolling_time = 7.5e4
return full_chip_rolling_time
chip_fraction = max(y1 - max_lines, max_lines + 1 - y0) / max_lines
return full_chip_rolling_time * chip_fraction
def _legalize_roi(self, roi):
"""This just calls the _legalize_roi function defined below.
Note that this method fills in some of the arguments for you.
"""
return legalize_roi(roi, self.camera_type, self.roi, self.verbose)
def _set_roi(self, region_of_interest):
roi = self._legalize_roi(region_of_interest)
dll.set_roi(self.camera_handle,
roi['left'], roi['top'], roi['right'], roi['bottom'])
assert self._get_roi() == roi
return self.roi
def _get_camera_type(self):
camera_name = C.c_char_p(b' '*40)
dll.get_camera_name(self.camera_handle, camera_name, 40)
name2type = {'pco.edge rolling shutter 4.2': 'edge 4.2',
'pco.edge 4.2 bi': 'edge 4.2 bi',
'pco.edge 5.5': 'edge 5.5', # This is probably wrong
'pco.panda 4.2': 'panda 4.2',
'pco.USB.Pixel.Fly': 'pixelfly'}
try:
self.camera_type = name2type[camera_name.value.decode('ascii')]
except KeyError:
raise UserWarning('Unexpected camera type - %s' % camera_name.value)
return self.camera_type
def legalize_roi(
roi,
camera_type='edge 4.2',
current_roi=None,
verbose=True):
"""
There are lots of ways a requested region of interest (ROI) can
be illegal. This utility function returns a nearby legal ROI.
Optionally, you can leave keys of 'roi' unspecified, and
_legalize_roi() tries to return reasonable choices based on
the values in current_roi.
"""
left = roi.get('left')
right = roi.get('right')
bottom = roi.get('bottom')
top = roi.get('top')
if verbose:
print(" Requested camera ROI:")
print(" From pixel", left, "to pixel", right, "(left/right)")
print(" From pixel", top, "to pixel", bottom, "(up/down)")
min_lr, min_ud = 1, 1
if camera_type == 'edge 4.2':
min_width, min_height = 40, 10
max_lr, max_ud, step_lr, = 2060, 2048, 20
elif camera_type == 'edge 4.2 bi':
min_width, min_height = 32, 16
max_lr, max_ud, step_lr, = 2048, 2048, 32
elif camera_type == 'edge 5.5':
min_width, min_height = 160, 10
max_lr, max_ud, step_lr = 2560, 2160, 160
elif camera_type == 'pixelfly':
min_width, min_height = 1392, 1040
max_lr, max_ud, step_lr = 1392, 1040, 1392
elif camera_type == 'panda 4.2':
# TODO min_width can be set to 32 when we upgrade the firmware on
# old pandas.
min_width, min_height = 192, 10
max_lr, max_ud, step_lr = 2048, 2048, 32
if current_roi is None:
current_roi = {'left': min_lr, 'right': max_lr,
'top': min_ud, 'bottom': max_ud}
# Legalize left/right
if left is None and right is None:
# User isn't trying to change l/r ROI; use existing ROI.
left, right = current_roi['left'], current_roi['right']
elif left is not None:
# 'left' is specified, 'left' is the master.
if left < min_lr: #Legalize 'left'
left = min_lr
elif left > max_lr - min_width + 1:
left = max_lr - min_width + 1
else:
left = 1 + step_lr*((left - 1) // step_lr)
if right is None: #Now legalize 'right'
right = current_roi['right']
if right < left + min_width - 1:
right = left + min_width - 1
elif right > max_lr:
right = max_lr
else:
right = left - 1 + step_lr*((right - (left - 1)) // step_lr)
else:
# 'left' is unspecified, 'right' is specified. 'right' is the master.
if right > max_lr: #Legalize 'right'
right = max_lr
elif right < min_lr - 1 + min_width:
right = min_width
else:
right = step_lr * (right // step_lr)
left = current_roi['left'] #Now legalize 'left'
if left > right - min_width + 1:
left = right - min_width + 1
elif left < min_lr:
left = min_lr
else:
left = right + 1 - step_lr * ((right - (left - 1)) // step_lr)
assert min_lr <= left < left + min_width - 1 <= right <= max_lr
# Legalize top/bottom
if top is None and bottom is None:
# User isn't trying to change u/d ROI; use existing ROI.
top, bottom = current_roi['top'], current_roi['bottom']
elif top is not None:
# 'top' is specified, 'top' is the master.
if top < min_ud: #Legalize 'top'
top = min_ud
if top > (max_ud - min_height)//2 + 1:
top = (max_ud - min_height)//2 + 1
bottom = max_ud - top + 1 #Now bottom is specified
else:
# 'top' is unspecified, 'bottom' is specified, 'bottom' is the master.
if bottom > max_ud: #Legalize 'bottom'
bottom = max_ud
if bottom < (max_ud + min_height)//2:
bottom = (max_ud + min_height)//2
top = max_ud - bottom + 1 #Now 'top' is specified
assert min_ud <= top < top + min_height - 1 <= bottom <= max_ud
new_roi = {'left': left, 'top': top, 'right': right, 'bottom': bottom}
if verbose and new_roi != roi:
print(" ***Requested ROI must be adjusted to match the camera***")
return new_roi
def decode_timestamps(image_stack):
"""Decode PCO image timestamps from binary-coded decimal.
"""
assert len(image_stack.shape) == 3
assert image_stack.dtype == 'uint16'
timestamps = image_stack[:, 0, :14]
timestamps = (timestamps & 0x0F) + (timestamps >> 4) * 10
ts = {}
ts['image_number'] = np.sum(
timestamps[:, :4] * np.array((1e6, 1e4, 1e2, 1)),
axis=1, dtype='uint32')
ts['year'] = np.sum(
timestamps[:, 4:6] * np.array((1e2, 1)),
axis=1, dtype='uint32')
ts['month'] = timestamps[:, 6].astype('uint32')
ts['day'] = timestamps[:, 7].astype('uint32')
ts['microseconds'] = np.sum(
timestamps[:, 8:14] * np.array((3600e6, 60e6, 1e6, 1e4, 1e2, 1)),
axis=1, dtype='uint64')
return ts
def reboot_camera():
""" Reboot the attached camera.
While this appears to work and recover the camera from a number
of error states, until recently, we always ran this as a
stand-alone script.
If you need to reboot the camera from within a script, the steps are:
* dll.reboot_camera
* dll.close_camera
* wait for reboot to complete
* dll.reset_dll
* dll.open_camera
"""
print('Connecting to camera...')
camera_handle = C.c_void_p(0)
dll.open_camera(camera_handle, 0)
print('Done connecting.')
print('Rebooting camera...')
dll.reboot_camera(camera_handle)
print('Done rebooting.')
print('Disconnecting from camera...', flush=True)
# We know the camera was just rebooted, so no need to check armed/recording
# state, just close it.
dll.close_camera(camera_handle)
print('Done disconnecting.')
print('Reconnecting to camera...', flush=True)
t0, timeout = time.perf_counter(), 10
while True:
# Reboot time is approximate, keep trying to open the camera until
# we are sucessful or timeout has elapsed.
try:
dll.reset_dll()
dll.open_camera(camera_handle, 0)
except OSError as e:
if time.perf_counter() - t0 > timeout:
raise
time.sleep(0.2)
else:
dll.close_camera(camera_handle)
print('Done reconnecting.')
return
def pco_camera_child_process(
data_buffers,
buffer_shape,
input_queue,
output_queue,
commands,
):
"""For use with image_data_pipeline.py
Probably will be deprecated soon.
https://github.com/AndrewGYork/tools/blob/master/image_data_pipeline.py
Debugged for the edge 4.2, less so for the edge 5.5, pixelfly and panda 4.2.
"""
from image_data_pipeline import info, Q, sleep, clock
try:
import pco
except ImportError:
info("Failed to import pco.py; go get it from github:")
info("https://github.com/AndrewGYork/tools/blob/master/pco.py")
raise
buffer_size = np.prod(buffer_shape)
info("Initializing...")
camera = pco.Camera(verbose=False)
camera.apply_settings(trigger='auto_trigger')
camera.arm(num_buffers=3)
info("Done initializing")
preframes = 3
first_trigger_timeout_seconds = 0
status = 'Normal'
while True:
if commands.poll():
cmd, args = commands.recv()
info("Command received: " + cmd)
if cmd == 'apply_settings':
result = camera.apply_settings(**args)
camera.arm(num_buffers=3)
commands.send(result)
elif cmd == 'get_setting':
setting = getattr(
camera, args['setting'], 'unrecognized_setting')
commands.send(setting)
elif cmd == 'set_buffer_shape':
buffer_shape = args['shape']
buffer_size = np.prod(buffer_shape)
commands.send(buffer_shape)
elif cmd == 'get_status':
commands.send(status)
elif cmd == 'reset_status':
status = 'Normal'
commands.send(status)
elif cmd == 'get_preframes':
commands.send(preframes)
elif cmd == 'set_preframes':
preframes = args['preframes']
commands.send(preframes)
elif cmd == 'get_first_trigger_timeout_seconds':
commands.send(first_trigger_timeout_seconds)
elif cmd == 'set_first_trigger_timeout_seconds':
first_trigger_timeout_seconds = args[
'first_trigger_timeout_seconds']
commands.send(first_trigger_timeout_seconds)
elif cmd == 'force_trigger':
result = camera._force_trigger()
commands.send(result)
else:
info("Unrecognized command: " + cmd)
commands.send("unrecognized_command")
continue
try:
permission_slip = input_queue.get_nowait()
except Q.Empty:
sleep(0.001) #Non-deterministic sleep time :(
continue
if permission_slip is None: #This is how we signal "shut down"
output_queue.put(permission_slip)
break #We're done
else:
# Fill the data buffer with images from the camera
time_received = clock()
process_me = permission_slip['which_buffer']
## Trust IDP to request a legal num_slices
num_slices = permission_slip.get('num_slices', buffer_shape[0])
info("start buffer %i, acquiring %i frames and %i preframes"%(
process_me, num_slices, preframes))
with data_buffers[process_me].get_lock():
a = np.frombuffer(
data_buffers[process_me].get_obj(),
dtype=np.uint16)[:buffer_size].reshape(buffer_shape
)[:num_slices, :, :]
try:
camera.record_to_memory(
num_images=a.shape[0] + preframes,
preframes=preframes,
out=a,
first_trigger_timeout_seconds=(
first_trigger_timeout_seconds))
except pco.TimeoutError as e:
info('TimeoutError: %s'%(e.value))
status = 'TimeoutError'
#FIXME: we can do better, probably. Keep trying?
#Should we zero the remainder of 'a'?
except pco.DMAError:
info('DMAError')
status = 'DMAError'
else:
status = 'Normal'
info("end buffer %i, %06f seconds elapsed"%(
process_me, clock() - time_received))
output_queue.put(permission_slip)
camera.close()
return None
# A few types of exception we'll use during recording:
class TimeoutError(Exception):
def __init__(self, value, num_acquired=0):
self.value = value
self.num_acquired = num_acquired
def __str__(self):
return repr(self.value)
class DMAError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# DLL management
try:
dll_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'SC2_Cam.dll')
dll = C.oledll.LoadLibrary(dll_path)
except WindowsError:
print("Failed to load SC2_Cam.dll")
print("You need this to run pco.py")
raise
dll.get_error_text = dll.PCO_GetErrorText
dll.get_error_text.argtypes = [C.c_uint32,
C.c_char_p,
C.c_uint32]
def check_error(error_code):
if error_code == 0:
return 0
else:
error_description = C.c_char_p(b'*'*1000)
dll.get_error_text(error_code, error_description, 1000)
raise OSError(error_description.value.decode('ascii'))
dll.open_camera = dll.PCO_OpenCamera
dll.open_camera.argtypes = [C.POINTER(C.c_void_p), C.c_uint16]
dll.open_camera.restype = check_error
dll.close_camera = dll.PCO_CloseCamera
dll.close_camera.argtypes = [C.c_void_p]
dll.close_camera.restype = check_error
dll.arm_camera = dll.PCO_ArmCamera
dll.arm_camera.argtypes = [C.c_void_p]
dll.arm_camera.restype = check_error
dll.allocate_buffer = dll.PCO_AllocateBuffer
dll.allocate_buffer.argtypes = [
C.c_void_p,
C.POINTER(C.c_int16),
C.c_uint32,
C.POINTER(C.POINTER(C.c_uint16)),
C.POINTER(C.c_void_p)]
dll.allocate_buffer.restype = check_error
dll.add_buffer = dll.PCO_AddBufferEx
dll.add_buffer.argtypes = [
C.c_void_p,
C.c_uint32,
C.c_uint32,
C.c_int16,
C.c_uint16,
C.c_uint16,
C.c_uint16]
dll.add_buffer.restype = check_error
dll.get_buffer_status = dll.PCO_GetBufferStatus
dll.get_buffer_status.argtypes = [
C.c_void_p,
C.c_int16,
C.POINTER(C.c_uint32),
C.POINTER(C.c_uint32)]
dll.get_buffer_status.restype = check_error
dll.set_image_parameters = dll.PCO_CamLinkSetImageParameters
dll.set_image_parameters.argtypes = [C.c_void_p, C.c_uint16, C.c_uint16]
dll.set_image_parameters.restype = check_error
dll.set_recording_state = dll.PCO_SetRecordingState
dll.set_recording_state.argtypes = [C.c_void_p, C.c_uint16]
dll.set_recording_state.restype = check_error
dll.get_sizes = dll.PCO_GetSizes
dll.get_sizes.argtypes = [
C.c_void_p,
C.POINTER(C.c_uint16),
C.POINTER(C.c_uint16),
C.POINTER(C.c_uint16),
C.POINTER(C.c_uint16)]
dll.get_sizes.restype = check_error
dll.get_timestamp_mode = dll.PCO_GetTimestampMode
dll.get_timestamp_mode.argtypes = [C.c_void_p, C.POINTER(C.c_uint16)]
dll.get_timestamp_mode.restype = check_error
dll.get_sensor_format = dll.PCO_GetSensorFormat
dll.get_sensor_format.argtypes = [C.c_void_p, C.POINTER(C.c_uint16)]
dll.get_sensor_format.restype = check_error
dll.get_camera_health = dll.PCO_GetCameraHealthStatus
dll.get_camera_health.argtypes = [
C.c_void_p,
C.POINTER(C.c_uint32),
C.POINTER(C.c_uint32),
C.POINTER(C.c_uint32)]
dll.get_camera_health.restype = check_error
dll.get_temperature = dll.PCO_GetTemperature
dll.get_temperature.argtypes = [
C.c_void_p,
C.POINTER(C.c_int16),
C.POINTER(C.c_int16),
C.POINTER(C.c_int16)]
dll.get_temperature.restype = check_error
dll.get_trigger_mode = dll.PCO_GetTriggerMode
dll.get_trigger_mode.argtypes = [C.c_void_p, C.POINTER(C.c_uint16)]
dll.get_trigger_mode.restype = check_error
dll.get_storage_mode = dll.PCO_GetStorageMode
dll.get_storage_mode.argtypes = [C.c_void_p, C.POINTER(C.c_uint16)]
dll.get_storage_mode.restype = check_error
dll.get_recorder_submode = dll.PCO_GetRecorderSubmode
dll.get_recorder_submode.argtypes = [C.c_void_p, C.POINTER(C.c_uint16)]
dll.get_recorder_submode.restype = check_error
dll.get_acquire_mode = dll.PCO_GetAcquireMode
dll.get_acquire_mode.argtypes = [C.c_void_p, C.POINTER(C.c_uint16)]
dll.get_acquire_mode.restype = check_error
dll.get_pixel_rate = dll.PCO_GetPixelRate
dll.get_pixel_rate.argtypes = [C.c_void_p, C.POINTER(C.c_uint32)]
dll.get_pixel_rate.restype = check_error
dll.set_pixel_rate = dll.PCO_SetPixelRate
dll.set_pixel_rate.argtypes = [C.c_void_p, C.c_uint32]
dll.set_pixel_rate.restype = check_error
dll.get_delay_exposure_time = dll.PCO_GetDelayExposureTime
dll.get_delay_exposure_time.argtypes = [
C.c_void_p,
C.POINTER(C.c_uint32),
C.POINTER(C.c_uint32),
C.POINTER(C.c_uint16),
C.POINTER(C.c_uint16)]
dll.get_delay_exposure_time.restype = check_error
dll.set_delay_exposure_time = dll.PCO_SetDelayExposureTime
dll.set_delay_exposure_time.argtypes = [
C.c_void_p,
C.c_uint32,
C.c_uint32,
C.c_uint16,
C.c_uint16]
dll.set_delay_exposure_time.restype = check_error
dll.get_roi = dll.PCO_GetROI
dll.get_roi.argtypes = [
C.c_void_p,
C.POINTER(C.c_uint16),
C.POINTER(C.c_uint16),
C.POINTER(C.c_uint16),
C.POINTER(C.c_uint16)]
dll.get_roi.restype = check_error
dll.set_roi = dll.PCO_SetROI
dll.set_roi.argtypes = [
C.c_void_p,
C.c_uint16,
C.c_uint16,
C.c_uint16,
C.c_uint16]
dll.set_roi.restype = check_error
dll.get_camera_name = dll.PCO_GetCameraName
dll.get_camera_name.argtypes = [
C.c_void_p,
C.c_char_p,
C.c_uint16]
dll.get_camera_name.restype = check_error
dll.reset_settings_to_default = dll.PCO_ResetSettingsToDefault
dll.reset_settings_to_default.argtypes = [C.c_void_p]
dll.reset_settings_to_default.restype = check_error
dll.set_recording_state = dll.PCO_SetRecordingState
dll.set_recording_state.argtypes = [C.c_void_p, C.c_uint16]
dll.set_recording_state.restype = check_error
dll.remove_buffer = dll.PCO_RemoveBuffer
dll.remove_buffer.argtypes = [C.c_void_p]
dll.remove_buffer.restype = check_error
dll.cancel_images = dll.PCO_CancelImages
dll.cancel_images.argtypes = [C.c_void_p]
dll.cancel_images.restype = check_error
dll.free_buffer = dll.PCO_FreeBuffer
dll.free_buffer.argtypes = [C.c_void_p, C.c_int16]
dll.free_buffer.restype = check_error
dll.set_timestamp_mode = dll.PCO_SetTimestampMode
dll.set_timestamp_mode.argtypes = [C.c_void_p, C.c_uint16]
dll.set_timestamp_mode.restype = check_error
dll.set_sensor_format = dll.PCO_SetSensorFormat
dll.set_sensor_format.argtypes = [C.c_void_p, C.c_uint16]
dll.set_sensor_format.restype = check_error
dll.set_trigger_mode = dll.PCO_SetTriggerMode
dll.set_trigger_mode.argtypes = [C.c_void_p, C.c_uint16]
dll.set_trigger_mode.restype = check_error
dll.force_trigger = dll.PCO_ForceTrigger
dll.force_trigger.argtypes = [C.c_void_p, C.POINTER(C.c_uint16)]
dll.force_trigger.restype = check_error
dll.set_recorder_submode = dll.PCO_SetRecorderSubmode
dll.set_recorder_submode.argtypes = [C.c_void_p, C.c_uint16]
dll.set_recorder_submode.restype = check_error
dll.set_acquire_mode = dll.PCO_SetAcquireMode
dll.set_acquire_mode.argtypes = [C.c_void_p, C.c_uint16]
dll.set_acquire_mode.restype = check_error
dll.set_storage_mode = dll.PCO_SetStorageMode
dll.set_storage_mode.argtypes = [C.c_void_p, C.c_uint16]
dll.set_storage_mode.restype = check_error
dll.reboot_camera = dll.PCO_RebootCamera
dll.reboot_camera.argtypes = [C.c_void_p]
dll.reset_dll = dll.PCO_ResetLib
dll.reset_dll.restype = check_error
if __name__ == '__main__':
camera = Camera(verbose=True, very_verbose=True)
# Half-assed edge testing; give randomized semi-garbage inputs, hope
# the plane don't crash.
blank_frames = 0
for i in range(10000):
# Random exposure time, biased towards shorter exposures
exposure = min(np.random.randint(1e2, 1e7, size=40))
# Random ROI, potentially with some/all limits unspecified.
roi = {
'top': np.random.randint(low=-2000, high=3000),
'bottom': np.random.randint(low=-2000, high=3000),
'left': np.random.randint(low=-2000, high=3000),
'right': np.random.randint(low=-2000, high=3000)}
# Delete some keys/vals
roi = {k: v for k, v in roi.items() if v > -10}
camera.apply_settings(exposure_time_microseconds=exposure,
region_of_interest=roi)
num_buffers = np.random.randint(1, 16)
camera.arm(num_buffers=num_buffers)
print("Allocating memory...")
images = np.zeros((np.random.randint(1, 5),
camera.height,
camera.width),
dtype=np.uint16)
print("Done allocating memory.")
print("Expected time:",
images.shape[0] *
1e-6 * max(camera.rolling_time_microseconds,
camera.exposure_time_microseconds))
start = time.perf_counter()
camera.record_to_memory(num_images=images.shape[0], out=images,
first_trigger_timeout_seconds=5)
print("Elapsed time:", time.perf_counter() - start)
print(images.min(axis=(1, 2)), images.max(axis=(1, 2)),
images.shape)
if not 0 < images.min() < images.max():
blank_frames += 1
print('Blank frame received (%d total)' % blank_frames)
camera.disarm()
print("%d blank frames received from %s during test" % (blank_frames,
camera_to_test))
camera.close()
| gpl-2.0 |
PX4/fw-test-harness | fw_test_harness/simulator.py | 2 | 11060 | #!/usr/bin/env python
from __future__ import print_function, division, absolute_import, \
unicode_literals
import os
import sys
from jsbsim import FGFDMExec
import matplotlib.pyplot as plt
import argparse
from pint import UnitRegistry
from html_report_generator import HtmlReportGenerator
from plots import add_plots
from fixedwing_controller import FixedWingController
import pyprind
import random
import copy
from analysis import analyse
ureg = UnitRegistry()
class Simulator:
"""Simulate mtecs"""
def __init__(self, args):
"""Constructor"""
self.args = args
self.fdm = FGFDMExec(root_dir=args["jsbsim_root"])
self.fdm.load_model("Rascal110-JSBSim")
# settings
self.sim_end_time_s = 120
self.dt = 0.005
self.dt_total_energy = 0.02
self.ic = {
"hgt": 400 * ureg.meter
}
# self.mode = "attitude"
self.mode = "position"
self.noise_enabled = True
self.sigmas = {
"airspeed": 5.0,
"altitude": 2.0,
"speed_body_u": 1.0, # TODO think again about introducing the noise in the body frame
"speed_body_v": 1.0,
"speed_body_w": 1.0,
}
self.parameters = {
"airspeed_trim": 20.0,
"airspeed_min": 7.0,
"airspeed_max": 60.0,
"coordinated_min_speed": 1000.0,
"coordinated_method": 0.0,
"att_tc": 0.5,
"k_p": 0.08,
"k_ff": 0.4,
"k_i": 0.05,
"i_max": 0.4,
"pitch_max_rate_pos": 0.0, # 0: disable
"pitch_max_rate_neg": 0.0, # 0: disable
"pitch_roll_ff": 0.0,
"throttle_default": 0.2,
"mtecs_acc_p": 0.01,
"mtecs_fpa_p": 0.01,
"mtecs_throttle_ff": 0.0,
"mtecs_throttle_p": 0.1,
"mtecs_throttle_i": 0.25,
"mtecs_pitch_ff": 0.0,
"mtecs_pitch_p": 0.1,
"mtecs_pitch_i": 0.03,
"mtecs_airspeed_lowpass_cutoff": 0.1,
"mtecs_airspeed_derivative_lowpass_cutoff": 0.1,
"mtecs_altitude_lowpass_cutoff": 0.1,
"mtecs_flightpathangle_lowpass_cutoff": 0.1,
}
self.control_surface_scaler = 1.0
self.controller = FixedWingController(self.parameters, self.dt_total_energy/self.dt, self.mode)
def init_sim(self):
"""init/reset simulation"""
# init states (dictionary of lists (each list contains a time series of
# a state/value))
self.jsbs_states = {
"ic/gamma-rad": [0],
"position/h-sl-meters": [self.ic["hgt"].magnitude],
"attitude/phi-rad": [0],
"velocities/p-rad_sec": [0],
"attitude/theta-rad": [0],
"velocities/q-rad_sec": [0],
"attitude/psi-rad": [0],
"velocities/r-rad_sec": [0],
"velocities/u-fps": [0],
"velocities/v-fps": [0],
"velocities/w-fps": [0],
"accelerations/udot-ft_sec2": [0],
"accelerations/vdot-ft_sec2": [0],
"accelerations/wdot-ft_sec2": [0],
"velocities/vt-fps": [ureg.Quantity(self.parameters["airspeed_trim"], "m/s").to(ureg["ft/s"]).magnitude], # XXX is this true airspeed, check...
"flight-path/gamma-rad": [0],
"propulsion/engine/thrust-lbs": [0]
}
self.jsbs_ic = {
"ic/h-sl-ft": [self.ic["hgt"].to(ureg.foot).magnitude],
"ic/vt-kts": [ureg.Quantity(self.parameters["airspeed_trim"], "m/s").to(ureg["kt"]).magnitude], # XXX is this true airspeed, check...
"ic/gamma-rad": [0],
}
self.jsbs_inputs = {
"fcs/aileron-cmd-norm": [0],
"fcs/elevator-cmd-norm": [0],
"fcs/rudder-cmd-norm": [0],
"fcs/throttle-cmd-norm": [0.0],
"fcs/mixture-cmd-norm": [0.87],
"propulsion/magneto_cmd": [3],
"propulsion/starter_cmd": [1]
}
self.sim_states = {
"t": [0.0],
}
self.setpoints = {}
self.update_setpoints(0)
self.noisy_states = {}
self.update_noisy_states(self.get_state())
self.control_data_log = {}
# set initial conditions and trim
for k, v in self.jsbs_ic.items():
self.fdm.set_property_value(k, v[0])
self.fdm.set_dt(self.dt)
self.fdm.reset_to_initial_conditions(0)
self.fdm.do_trim(0)
def get_state(self):
"""
creates a dictionary of the current state, to be used as control input
"""
x = {}
x["t"] = self.sim_states["t"][-1]
x["roll"] = self.jsbs_states["attitude/phi-rad"][-1]
x["roll_rate"] = self.jsbs_states["velocities/p-rad_sec"][-1]
x["pitch"] = self.jsbs_states["attitude/theta-rad"][-1]
x["pitch_rate"] = self.jsbs_states["velocities/q-rad_sec"][-1]
x["yaw"] = self.jsbs_states["attitude/psi-rad"][-1]
x["yaw_rate"] = self.jsbs_states["velocities/r-rad_sec"][-1]
x["speed_body_u"] = ureg.Quantity(
self.jsbs_states["velocities/u-fps"][-1],
"ft/s").to(ureg["m/s"]).magnitude
x["speed_body_v"] = ureg.Quantity(
self.jsbs_states["velocities/v-fps"][-1],
"ft/s").to(ureg["m/s"]).magnitude
x["speed_body_w"] = ureg.Quantity(
self.jsbs_states["velocities/w-fps"][-1],
"ft/s").to(ureg["m/s"]).magnitude
x["acc_body_x"] = self.jsbs_states["accelerations/udot-ft_sec2"][-1]
x["acc_body_y"] = self.jsbs_states["accelerations/vdot-ft_sec2"][-1]
x["acc_body_z"] = self.jsbs_states["accelerations/wdot-ft_sec2"][-1]
x["airspeed"] = ureg.Quantity(
self.jsbs_states["velocities/vt-fps"][-1],
"ft/s").to(ureg["m/s"]).magnitude
x["altitude"] = self.jsbs_states["position/h-sl-meters"][-1]
x["flightpathangle"] = self.jsbs_states["flight-path/gamma-rad"][-1]
# additonal/secondary data that is not a state in the physical sense but is needed
# by the controller and describes the aircraft state as well:
if x["airspeed"] > self.parameters["airspeed_min"]:
x["scaler"] = self.parameters["airspeed_trim"] / x["airspeed"]
else:
x["scaler"] = self.parameters["airspeed_trim"] \
/ self.parameters["airspeed_min"]
x["lock_integrator"] = False
return x
def calc_setpoints(self, time):
"""Generate setpoint to be used in the controller"""
r = {}
r["roll"] = 0.0
r["pitch"] = 0.0
r["yaw"] = 0.0
r["roll_rate"] = 0.0
r["pitch_rate"] = 0.0
r["yaw_rate"] = 0.0
r["altitude"] = self.ic["hgt"].magnitude if time < 20 else self.ic["hgt"].magnitude + 10
# r["altitude"] = self.ic["hgt"].magnitude
r["velocity"] = self.parameters["airspeed_trim"]
return r
def update_setpoints(self, time):
"""updates the setpoint"""
sp = self.calc_setpoints(time)
for k, v in sp.items():
self.setpoints.setdefault(k,[]).append(v)
def step(self):
"""Perform one simulation step
implementation is accoding to FGFDMExec's own simulate but we don't
want to move the parameters in and out manually
"""
# control
# self.jsbs_inputs["fcs/elevator-cmd-norm"].append(0.01 * (400 -
# self.jsbs_states["position/h-sl-meters"][-1]))
self.update_setpoints(self.fdm.get_sim_time())
state = self.get_state()
self.update_noisy_states(state)
state_estimate = self.apply_noise(state) # estimate is simulated as true state plus gaussian noise
u, control_data = self.controller.control(state=state_estimate,
setpoint={k: v[-1] for k, v in self.setpoints.items()},
parameters = self.parameters)
self.jsbs_inputs["fcs/aileron-cmd-norm"].append(u[0] * self.control_surface_scaler)
self.jsbs_inputs["fcs/elevator-cmd-norm"].append(-u[1] * self.control_surface_scaler)
self.jsbs_inputs["fcs/rudder-cmd-norm"].append(u[2] * self.control_surface_scaler)
self.jsbs_inputs["fcs/throttle-cmd-norm"].append(u[3])
# copy control data to for later plotting
for k,v in control_data.items():
self.control_data_log.setdefault(k, [0.0]).append(v)
# pass control resultto jsbsim
for k, v in self.jsbs_inputs.items():
self.fdm.set_property_value(k, v[-1])
# do one step in jsbsim
self.fdm.run()
# read out result from jsbsim
for k, v in self.jsbs_states.items():
self.jsbs_states[k].append(self.fdm.get_property_value(k))
return self.fdm.get_sim_time()
def output_results(self):
"""Generate a report of the simulation"""
rg = HtmlReportGenerator(self.args)
add_plots(self, rg) # change add_plots to show different plots!
rg.variables.update(analyse(self))
rg.generate()
rg.save()
print("Report saved to {0}".format(self.args["filename_out"]))
def apply_noise(self, state):
"""replaces entries in state with the noisy data (for states for which noise data exists)"""
state_estimate = copy.copy(state)
for k,v in self.noisy_states.items():
state_estimate[k] = self.noisy_states[k][-1]
return state_estimate
def update_noisy_states(self, state):
"""caclculate noisy version of state for which noise data exists"""
for k, v in self.sigmas.items():
self.noisy_states.setdefault(k,[]).append(state[k] + random.gauss(0,v))
def main(self):
"""main method of the simulator"""
self.init_sim()
# run simulation
bar = pyprind.ProgBar(self.sim_end_time_s)
time_last_bar_update = 0
while self.sim_states["t"][-1] < self.sim_end_time_s:
self.sim_states["t"].append(self.step())
if self.sim_states["t"][-1] >= time_last_bar_update + 1: # throttle update of progress bar
bar.update()
time_last_bar_update = self.sim_states["t"][-1]
bar.update()
self.output_results()
if __name__ == "__main__":
"""run with python2 simulator.py"""
parser = argparse.ArgumentParser(
description='simulates aircraft control with px4/mtecs')
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument(
'--jsbsim_root',
dest='jsbsim_root',
default=os.path.dirname(os.path.realpath(sys.argv[0])) + '/../external/')
parser.add_argument('-o', dest='filename_out', default='report.html')
args = parser.parse_args()
s = Simulator(vars(args))
if args.test:
s.test()
else:
s.main()
| mit |
ilkerc/noduledetector | objectclassifier/plot_roc_forCT.py | 1 | 6679 | import h5py
from FeatureSet import FeatureSet
from accurancyTools import *
from sklearn import ensemble
import itertools
import matplotlib.pyplot as plt
trainingDataSetNames = ['Volume', 'CentroidNorm', 'Centroid', 'Perimeter', 'PseudoRadius', 'Complexity',
'BoundingBox2Volume', 'BoundingBoxAspectRatio', 'IntensityMax', 'IntensityMean',
'IntensityMin', 'IntensityStd', 'CloseMassRatio', 'IntensityHist', 'gaussianCoefficients',
'gaussianGOV', 'Gradient', 'GradientOfMag']
senaryo1File = '../../noduledetectordata/ilastikoutput3/s1/s1.h5'
senaryo1LabelFile = '../../noduledetectordata/ilastikoutput3/s1/s1_labels.h5'
senaryo1BatchFile = '../../noduledetectordata/ilastikoutput3/s1/example_05.h5'
senaryo1BatchLabelFile = '../../noduledetectordata/ilastikoutput3/s1/labels_example_05.h5'
senaryo1 = {'all_features': senaryo1File, 'all_features_labels': senaryo1LabelFile, 'batch_features': senaryo1BatchFile, 'batch_features_labels': senaryo1BatchLabelFile}
senaryo1_train_sets = FeatureSet.readFromFile(senaryo1File, trainingDataSetNames, senaryo1LabelFile, 'labels')
senaryo1_batch_sets = FeatureSet.readFromFile(senaryo1BatchFile, trainingDataSetNames, senaryo1BatchLabelFile, 'labels')
senaryo1_sets = {'all_features_set': senaryo1_train_sets, 'batch_features_set': senaryo1_batch_sets, 'fileName': 'example05'}
senaryo2File = '../../noduledetectordata/ilastikoutput3/s2/s2.h5'
senaryo2LabelFile = '../../noduledetectordata/ilastikoutput3/s2/s2_labels.h5'
senaryo2BatchFile = '../../noduledetectordata/ilastikoutput3/s2/example_01.h5'
senaryo2BatchLabelFile = '../../noduledetectordata/ilastikoutput3/s2/labels_example_01.h5'
senaryo2 = {'all_features': senaryo2File, 'all_features_labels': senaryo2LabelFile, 'batch_features': senaryo2BatchFile, 'batch_features_labels': senaryo2BatchLabelFile}
senaryo2_train_sets = FeatureSet.readFromFile(senaryo2File, trainingDataSetNames, senaryo2LabelFile, 'labels')
senaryo2_batch_sets = FeatureSet.readFromFile(senaryo2BatchFile, trainingDataSetNames, senaryo2BatchLabelFile, 'labels')
senaryo2_sets = {'all_features_set': senaryo2_train_sets, 'batch_features_set': senaryo2_batch_sets, 'fileName': 'example01'}
senaryo3File = '../../noduledetectordata/ilastikoutput3/s3/s3.h5'
senaryo3LabelFile = '../../noduledetectordata/ilastikoutput3/s3/s3_labels.h5'
senaryo3BatchFile = '../../noduledetectordata/ilastikoutput3/s3/example_03.h5'
senaryo3BatchLabelFile = '../../noduledetectordata/ilastikoutput3/s3/labels_example_03.h5'
senaryo3 = {'all_features': senaryo3File, 'all_features_labels': senaryo3LabelFile, 'batch_features': senaryo3BatchFile, 'batch_features_labels': senaryo3BatchLabelFile}
senaryo3_train_sets = FeatureSet.readFromFile(senaryo3File, trainingDataSetNames, senaryo3LabelFile, 'labels')
senaryo3_batch_sets = FeatureSet.readFromFile(senaryo3BatchFile, trainingDataSetNames, senaryo3BatchLabelFile, 'labels')
senaryo3_sets = {'all_features_set': senaryo3_train_sets, 'batch_features_set': senaryo3_batch_sets, 'fileName': 'example03'}
senaryo4File = '../../noduledetectordata/ilastikoutput3/s4/s4.h5'
senaryo4LabelFile = '../../noduledetectordata/ilastikoutput3/s4/s4_labels.h5'
senaryo4BatchFile = '../../noduledetectordata/ilastikoutput3/s4/example_02.h5'
senaryo4BatchLabelFile = '../../noduledetectordata/ilastikoutput3/s4/labels_example_02.h5'
senaryo4 = {'all_features': senaryo4File, 'all_features_labels': senaryo4LabelFile, 'batch_features': senaryo4BatchFile, 'batch_features_labels': senaryo4BatchLabelFile}
senaryo4_train_sets = FeatureSet.readFromFile(senaryo4File, trainingDataSetNames, senaryo4LabelFile, 'labels')
senaryo4_batch_sets = FeatureSet.readFromFile(senaryo4BatchFile, trainingDataSetNames, senaryo4BatchLabelFile, 'labels')
senaryo4_sets = {'all_features_set': senaryo4_train_sets, 'batch_features_set': senaryo4_batch_sets, 'fileName': 'example02'}
all_senaryo_files = [senaryo1, senaryo2, senaryo3, senaryo4]
all_senaryo_sets = [senaryo1_sets, senaryo2_sets, senaryo3_sets, senaryo4_sets]
alphaStep = 30
alphaStart = 0.3
alphaStop = 0.8
treeCount = 10
mult = 15.0
alphaRange = numpy.linspace(alphaStart, alphaStop, alphaStep)
random_seed = 100
numpy.random.seed(random_seed)
roc_iterator = 0
TPMAT = numpy.zeros((all_senaryo_sets.__len__(), alphaStep))
FPMAT = numpy.zeros((all_senaryo_sets.__len__(), alphaStep))
s = 0
a = 0
f = h5py.File('../randomforest_results_wg.h5', 'w')
allpositives = 0
for senaryo in all_senaryo_sets:
allpositives += sum(senaryo['batch_features_set'].labels==1)
avg_tp_rate = 0.0
avg_fp_number = 0
rf = ensemble.RandomForestClassifier(n_estimators=treeCount, random_state=(roc_iterator+1)*10)
sde = senaryo['all_features_set'].balanceOnLabel(multiplier=mult)
sample_weight = numpy.array([1/mult if i == 0 else 1.0 for i in sde.labels])
rf.fit(sde.data, numpy.ravel(sde.labels), sample_weight)
for x in range(0, treeCount):
rf_ext = ensemble.RandomForestClassifier(n_estimators=treeCount, random_state=(roc_iterator+1)*10)
sde = senaryo['all_features_set'].balanceOnLabel(multiplier=mult)
sample_weight = numpy.array([1/mult if i == 0 else 1.0 for i in sde.labels])
rf_ext.fit(sde.data, numpy.ravel(sde.labels), sample_weight)
rf.estimators_.extend(rf_ext.estimators_)
rf.n_estimators += rf_ext.n_estimators
p = rf.predict_proba(senaryo['batch_features_set'].data)
for alpha in alphaRange:
predBias = numpy.array([1-alpha, alpha])
set_test_results = calculateAccuracyN(p, senaryo['batch_features_set'].labels, bias=predBias, verbose=True)
TPMAT[s][a] = set_test_results['tpnumber']
#TPMAT[s][a] = set_test_results['tprate']
FPMAT[s][a] = set_test_results['fpnumber']
a += 1
dataset = f.create_dataset(senaryo['fileName'], numpy.shape(p), dtype='float')
dataset[:] = p
roc_iterator += 1
s += 1
a = 0
avg_tp_rate = numpy.sum(TPMAT, axis=0) / allpositives
avg_fp_number = numpy.sum(FPMAT, axis=0) / len(all_senaryo_sets)
merge_fp_tp = numpy.r_[avg_fp_number[None, :], avg_tp_rate[None, :]]
merge_fp_tp = merge_fp_tp.transpose()
dset = f.create_dataset('roc_vals', shape=numpy.shape(merge_fp_tp), dtype='float')
dset[:] = merge_fp_tp
dset.attrs.create('var_mult', mult)
dset.attrs.create('var_treecount', treeCount)
dset.attrs.create('var_alphastep', alphaStep)
dset.attrs.create('var_alphastart', alphaStart)
dset.attrs.create('var_alphastop', alphaStop)
f.close()
print merge_fp_tp
print 'Mult. : ', mult
print 'TreeCount :', treeCount
for x in range(0, 29):
if avg_fp_number[x] < 20:
print 'Alpha : ', alphaRange[x], '& TP : ', avg_tp_rate[x], '& FP : &', avg_fp_number[x] | mit |
94KeyboardsSmashed/SLI2017_Hurgus_Madison | Python_Data_Analysis/Accelerometer_Data_XYZ_Graph.py | 1 | 2043 | # -*- coding: utf-8 -*-
"""
Created on Tue May 9 12:13:27 2017
@author: Hyun-seok
"""
import numpy as np
import math
import matplotlib.pyplot as plt
def mag(x):
#Calculates Magnitude (sqrt(x^2+y^2+z^2))
#Takes list of tuples (x, y, z)
return math.sqrt(sum(float(i)**2 for i in x))
def linesplit(number):
#Parses log data into values into a iterable list
#Takes an integer between 0 and 3. 0 is time, 1 is x, 2 is y, 3 is z.
#Be sure to sanitize the created list to make it readable for the program
return [line.split(",")[number] for line in lines]
def sanitize(lst):
#Gets rid of unnessesary spaces from the log file.
#Use to after linesplit to make lists readable by python
return [s.rstrip() for s in lst]
#Have log.txt in the same folder as this code.
#Name of log has to be same as name in the open function
with open('log.txt') as log:
lines = log.readlines()
rawtime = linesplit(0)
rawX = linesplit(1)
rawY = linesplit(2)
rawZ = linesplit(3)
time = sanitize(rawtime)
x = sanitize(rawX)
y = sanitize(rawY)
z = sanitize(rawZ)
#consult matplotlib libraries, especially pyplot.
plt.figure()
#subplot 1. X axis read-outs on a regular graph. Green
plt.subplot(3, 1, 1)
plt.plot(time, x, 'g')
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("Amplitude (normalized log scale)")
plt.xlabel("Frequency (in Hertz)")
#subplot 2. Y axis read-outs on a regular graph. Red
plt.subplot(3, 1, 2)
plt.plot(time, y, 'r')
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("acceleration m/s**2")
plt.xlabel("time (s)")
#subplot 3. Z axis read-outs on a regular graph. Blue
plt.subplot(3, 1, 3)
plt.plot(time, z)
plt.axhline(y=0, color='b', linestyle='-')
plt.ylabel("acceleration m/s**2")
plt.xlabel("time (s)")
plt.show()
| mit |
saquiba2/numpytry | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
dimkal/mne-python | mne/viz/utils.py | 6 | 30185 | """Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import math
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose, set_config
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
def _setup_vmin_vmax(data, vmin, vmax, norm=False):
"""Aux function to handle vmin and vmax parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
if norm:
vmin = 0.
else:
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
if norm:
vmin = 0.
else:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmax is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to `pad_inches`.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to `pad_inches`.
fig : instance of Figure
Figure to apply changes to.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
warn('Matplotlib function \'tight_layout\' is not supported.'
' Skipping subplot adjusment.')
else:
try:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
pass
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_proj'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_proj'])
del params['proj_checks']
params['fig_proj'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_help_text(params):
"""Aux function for customizing help dialogs text."""
text, text2 = list(), list()
text.append(u'\u2190 : \n')
text.append(u'\u2192 : \n')
text.append(u'\u2193 : \n')
text.append(u'\u2191 : \n')
text.append(u'- : \n')
text.append(u'+ or = : \n')
text.append(u'Home : \n')
text.append(u'End : \n')
text.append(u'Page down : \n')
text.append(u'Page up : \n')
text.append(u'F11 : \n')
text.append(u'? : \n')
text.append(u'Esc : \n\n')
text.append(u'Mouse controls\n')
text.append(u'click on data :\n')
text2.append('Navigate left\n')
text2.append('Navigate right\n')
text2.append('Scale down\n')
text2.append('Scale up\n')
text2.append('Toggle full screen mode\n')
text2.append('Open help box\n')
text2.append('Quit\n\n\n')
if 'raw' in params:
text2.insert(4, 'Reduce the time shown per view\n')
text2.insert(5, 'Increase the time shown per view\n')
text.append(u'click elsewhere in the plot :\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
elif 'epochs' in params:
text.append(u'right click :\n')
text2.insert(4, 'Reduce the number of epochs per view\n')
text2.insert(5, 'Increase the number of epochs per view\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark component for exclusion\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text.append(u'right click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text.insert(10, u'b : \n')
text2.insert(10, 'Toggle butterfly plot on/off\n')
text.insert(11, u'h : \n')
text2.insert(11, 'Show histogram of peak-to-peak values\n')
text2.append('Mark bad epoch\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
text2.append('Plot ERP/ERF image\n')
text.append(u'middle click :\n')
text2.append('Show channel name (butterfly plot)\n')
text.insert(11, u'o : \n')
text2.insert(11, 'View settings (orig. view only)\n')
return ''.join(text), ''.join(text2)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
def _layout_figure(params):
"""Function for setting figure layout. Shared with raw and epoch plots"""
size = params['fig'].get_size_inches() * params['fig'].dpi
scroll_width = 25
hscroll_dist = 25
vscroll_dist = 10
l_border = 100
r_border = 10
t_border = 35
b_border = 40
# only bother trying to reset layout if it's reasonable to do so
if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
return
# convert to relative units
scroll_width_x = scroll_width / size[0]
scroll_width_y = scroll_width / size[1]
vscroll_dist /= size[0]
hscroll_dist /= size[1]
l_border /= size[0]
r_border /= size[0]
t_border /= size[1]
b_border /= size[1]
# main axis (traces)
ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
ax_y = hscroll_dist + scroll_width_y + b_border
ax_height = 1.0 - ax_y - t_border
pos = [l_border, ax_y, ax_width, ax_height]
params['ax'].set_position(pos)
if 'ax2' in params:
params['ax2'].set_position(pos)
params['ax'].set_position(pos)
# vscroll (channels)
pos = [ax_width + l_border + vscroll_dist, ax_y,
scroll_width_x, ax_height]
params['ax_vscroll'].set_position(pos)
# hscroll (time)
pos = [l_border, b_border, ax_width, scroll_width_y]
params['ax_hscroll'].set_position(pos)
if 'ax_button' in params:
# options button
pos = [l_border + ax_width + vscroll_dist, b_border,
scroll_width_x, scroll_width_y]
params['ax_button'].set_position(pos)
if 'ax_help_button' in params:
pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
scroll_width_x * 2, scroll_width_y]
params['ax_help_button'].set_position(pos)
params['fig'].canvas.draw()
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
rcParams['toolbar'] = old_val
return fig
def _helper_raw_resize(event, params):
"""Helper for resizing"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _plot_raw_onscroll(event, params, len_channels=None):
"""Interpret scroll events"""
if len_channels is None:
len_channels = len(params['info']['ch_names'])
orig_start = params['ch_start']
if event.step < 0:
params['ch_start'] = min(params['ch_start'] + params['n_channels'],
len_channels - params['n_channels'])
else: # event.key == 'up':
params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
if orig_start != params['ch_start']:
_channels_changed(params, len_channels)
def _channels_changed(params, len_channels):
"""Helper function for dealing with the vertical shift of the viewport."""
if params['ch_start'] + params['n_channels'] > len_channels:
params['ch_start'] = len_channels - params['n_channels']
if params['ch_start'] < 0:
params['ch_start'] = 0
params['plot_fun']()
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
def _plot_raw_onkey(event, params):
"""Interpret key presses"""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(params['fig'])
elif event.key == 'down':
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'up':
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'right':
value = params['t_start'] + params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key == 'left':
value = params['t_start'] - params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key in ['+', '=']:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == '-':
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key == 'pageup':
n_channels = params['n_channels'] + 1
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'pagedown':
n_channels = params['n_channels'] - 1
if n_channels == 0:
return
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
if len(params['lines']) > n_channels: # remove line from view
params['lines'][n_channels].set_xdata([])
params['lines'][n_channels].set_ydata([])
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'home':
duration = params['duration'] - 1.0
if duration <= 0:
return
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == 'end':
duration = params['duration'] + 1.0
if duration > params['raw'].times[-1]:
duration = params['raw'].times[-1]
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
def _mouse_click(event, params):
"""Vertical select callback"""
if event.button != 1:
return
if event.inaxes is None:
if params['n_channels'] > 100:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
return
params['label_click_fun'](pos)
# vertical scrollbar changed
if event.inaxes == params['ax_vscroll']:
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scrollbar changed
elif event.inaxes == params['ax_hscroll']:
_plot_raw_time(event.xdata - params['duration'] / 2, params)
params['update_fun']()
params['plot_fun']()
elif event.inaxes == params['ax']:
params['pick_bads_fun'](event)
def _select_bads(event, params, bads):
"""Helper for selecting bad channels onpick. Returns updated bads list."""
# trade-off, avoid selecting more than one channel when drifts are present
# however for clean data don't click on peaks but on flat segments
def f(x, y):
return y(np.mean(x), x.std() * 2)
lines = event.inaxes.lines
for line in lines:
ydata = line.get_ydata()
if not isinstance(ydata, list) and not np.isnan(ydata).any():
ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
if ymin <= event.ydata <= ymax:
this_chan = vars(line)['ch_name']
if this_chan in params['info']['ch_names']:
ch_idx = params['ch_start'] + lines.index(line)
if this_chan not in bads:
bads.append(this_chan)
color = params['bad_color']
line.set_zorder(-1)
else:
while this_chan in bads:
bads.remove(this_chan)
color = vars(line)['def_color']
line.set_zorder(0)
line.set_color(color)
params['ax_vscroll'].patches[ch_idx].set_color(color)
break
else:
x = np.array([event.xdata] * 2)
params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
params['vertline_t'].set_text('%0.3f' % x[0])
return bads
def _onclick_help(event, params):
"""Function for drawing help window"""
import matplotlib.pyplot as plt
text, text2 = _get_help_text(params)
width = 6
height = 5
fig_help = figure_nobar(figsize=(width, height), dpi=80)
fig_help.canvas.set_window_title('Help')
ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
ax.set_title('Keyboard shortcuts')
plt.axis('off')
ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
ax1.set_yticklabels(list())
plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
ha='right')
plt.axis('off')
ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
ax2.set_yticklabels(list())
plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
plt.axis('off')
tight_layout(fig=fig_help)
# this should work for non-test cases
try:
fig_help.canvas.draw()
fig_help.show()
except Exception:
pass
class ClickableImage(object):
"""
Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata: ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
from matplotlib.pyplot import figure, show
self.coords = []
self.imdata = imdata
self.fig = figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata, aspect='auto',
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
show()
def onclick(self, event):
"""Mouse click handler.
Parameters
----------
event: matplotlib event object
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
from matplotlib.pyplot import subplots, show
f, ax = subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='r')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='r')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout
"""
from mne.channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax', button=1):
"""Helper to fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
raise ValueError('unknown transform')
try:
fig.canvas.button_press_event(x, y, button, False, None)
except Exception: # for old MPL
fig.canvas.button_press_event(x, y, button, False)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in `im` to the
figure `fig`. This is generally meant to
be done with topo plots, though it could work
for any plot.
Note: This modifies the figure and/or axes
in place.
Parameters
----------
fig: plt.figure
The figure you wish to add a bg image to.
im: ndarray
A numpy array that works with a call to
plt.imshow(im). This will be plotted
as the background of the figure.
set_ratios: None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im: instance of the create matplotlib axis object
corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1])
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/numpy/lib/recfunctions.py | 10 | 39618 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
Similar to dtype.descr, but the second item of each tuple is a dtype, not a
string. As a result, this handles subarray dtypes
Can be passed to the dtype constructor to reconstruct the dtype, noting that
this (deliberately) discards field offsets.
Examples
--------
>>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)])
>>> dt.descr
[(('a', 'A'), '<i4'), ('b', '<f8', (3,))]
>>> get_fieldspec(dt)
[(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))]
"""
if dtype.names is None:
# .descr returns a nameless field, so we should too
return [('', dtype)]
else:
fields = ((name, dtype.fields[name]) for name in dtype.names)
# keep any titles, if present
return [
(name if len(f) == 2 else (f[2], name), f[0])
for name, f in fields
]
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return (('', ndtype),)
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
if current.names and len(current.names) <= 1:
# special case - dtypes of 0 or 1 field are flattened
newdtype.extend(get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
return zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
if sys.version_info[0] >= 3:
zip_longest = itertools.zip_longest
else:
zip_longest = itertools.izip_longest
for tup in zip_longest(*seqarrays, fillvalue=fill_value):
yield tuple(zipfunc(tup))
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
depending on what its corresponding type:
* ``-1`` for integers
* ``-1.0`` for floating point numbers
* ``'-'`` for characters
* ``'-1'`` for strings
* ``True`` for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
# Make sure we have named fields
if not seqdtype.names:
seqdtype = np.dtype([('', seqdtype)])
if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
"""
Return a new array keeping only the fields in `keep_names`,
and preserving the order of those fields.
Parameters
----------
base : array
Input array
keep_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to keep. Order of the names will be preserved.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
"""
newdtype = [(n, base.dtype[n]) for n in keep_names]
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(
max(len(base), len(data)),
dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to `np.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1,i4,f4', align=True)
>>> dt
dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True)
>>> print_offsets(dt)
offsets: [0, 4, 8]
itemsize: 16
>>> packed_dt = repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 5]
itemsize: 13
"""
if not isinstance(a, np.dtype):
dt = repack_fields(a.dtype, align=align, recurse=recurse)
return a.astype(dt, copy=False)
if a.names is None:
return a
fieldinfo = []
for name in a.names:
tup = a.fields[name]
if recurse:
fmt = repack_fields(tup[0], align=align, recurse=True)
else:
fmt = tup[0]
if len(tup) == 3:
name = (tup[2], name)
fieldinfo.append((name, fmt))
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
for fname, fdtype in get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
else:
nameidx = names.index(fname)
_, cdtype = newdescr[nameidx]
if autoconvert:
newdescr[nameidx] = (fname, max(fdtype, cdtype))
elif fdtype != cdtype:
raise TypeError("Incompatible type '%s' <> '%s'" %
(cdtype, fdtype))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
if len(set(key)) != len(key):
dup = next(x for n,x in enumerate(key) if x in key[n+1:])
raise ValueError("duplicate join key %r" % dup)
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %r' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %r' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
collisions = (set(r1names) & set(r2names)) - set(key)
if collisions and not (r1postfix or r2postfix):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't both be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
# (use order of keys in `r1` for back-compatibility)
key1 = [ n for n in r1names if n in key ]
r1k = _keep_fields(r1, key1)
r2k = _keep_fields(r2, key1)
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = get_fieldspec(r1k.dtype)
# Add the fields from r1
for fname, fdtype in get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
for fname, fdtype in get_fieldspec(r2.dtype):
# Have we seen the current name already ?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
try:
nameidx = names.index(fname)
except ValueError:
#... we haven't: just add the description to the current list
ndtype.append((fname, fdtype))
else:
# collision
_, cdtype = ndtype[nameidx]
if fname in key:
# The current field is part of the key: take the largest dtype
ndtype[nameidx] = (fname, max(fdtype, cdtype))
else:
# The current field is not part of the key: add the suffixes,
# and place the new field adjacent to the old one
ndtype[nameidx:nameidx + 1] = [
(fname + r1postfix, cdtype),
(fname + r2postfix, fdtype)
]
# Rebuild a dtype from the new fields
ndtype = np.dtype(ndtype)
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| mit |
tomasreimers/tensorflow-emscripten | tensorflow/examples/learn/iris_custom_model.py | 11 | 2621 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
guziy/basemap | doc/users/figures/plotetopo5.py | 6 | 2336 | from mpl_toolkits.basemap import Basemap, shiftgrid, cm
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
# read in etopo5 topography/bathymetry.
url = 'http://ferret.pmel.noaa.gov/thredds/dodsC/data/PMEL/etopo5.nc'
etopodata = Dataset(url)
topoin = etopodata.variables['ROSE'][:]
lons = etopodata.variables['ETOPO05_X'][:]
lats = etopodata.variables['ETOPO05_Y'][:]
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons = shiftgrid(180.,topoin,lons,start=False)
# plot topography/bathymetry as an image.
# create the figure and axes instances.
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# setup of basemap ('lcc' = lambert conformal conic).
# use major and minor sphere radii from WGS84 ellipsoid.
m = Basemap(llcrnrlon=-145.5,llcrnrlat=1.,urcrnrlon=-2.566,urcrnrlat=46.352,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',area_thresh=1000.,projection='lcc',\
lat_1=50.,lon_0=-107.,ax=ax)
# transform to nx x ny regularly spaced 5km native projection grid
nx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1
topodat = m.transform_scalar(topoin,lons,lats,nx,ny)
# plot image over map with imshow.
im = m.imshow(topodat,cm.GMT_haxby)
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
# draw parallels and meridians.
# label on left and bottom of map.
parallels = np.arange(0.,80,20.)
m.drawparallels(parallels,labels=[1,0,0,1])
meridians = np.arange(10.,360.,30.)
m.drawmeridians(meridians,labels=[1,0,0,1])
# add colorbar
cb = m.colorbar(im,"right", size="5%", pad='2%')
ax.set_title('ETOPO5 Topography - Lambert Conformal Conic')
plt.show()
# make a shaded relief plot.
# create new figure, axes instance.
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# attach new axes image to existing Basemap instance.
m.ax = ax
# create light source object.
from matplotlib.colors import LightSource
ls = LightSource(azdeg = 90, altdeg = 20)
# convert data to rgb array including shading from light source.
# (must specify color map)
rgb = ls.shade(topodat, cm.GMT_haxby)
im = m.imshow(rgb)
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
ax.set_title('Shaded ETOPO5 Topography - Lambert Conformal Conic')
plt.show()
| gpl-2.0 |
ucsd-progsys/nate | learning/svm.py | 2 | 1569 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import math
import util
def build_model(features, labels, learn_rate=0.1, model_dir=None):
'''Build a linear classifier.
@param features: A list of feature names.
@param labels: A list of label names.
@param learn_rate: The training rate, defaults to 0.1.
@param model_dir: A directory to store the model summaries in.
@return: A 4-tuple of training, testing, plotting, and closing functions.
'''
## TODO: construct actual model
def train(data, i, validation=None, verbose=False):
summary_str, _ = sess.run([merged, train_step],
feed_dict={x: data[features], y_: data[labels]})
summary_writer.add_summary(summary_str, i)
if validation is not None:
acc = sess.run(accuracy,
feed_dict={x: validation[features], y_: validation[labels]})
if verbose and i % 100 == 0:
print('accuracy at step {}: {}'.format(i, acc))
def test(data):
acc = sess.run(accuracy, {x: data[features], y_: data[labels]})
print('accuracy: %f' % acc)
def plot():
w = sess.run(tf.transpose(W))
plt.matshow(w, cmap='hot', interpolation='nearest')
plt.xticks(np.arange(len(features)), features, rotation=90)
plt.yticks(np.arange(len(labels)), labels)
# plt.legend()
plt.show()
def close():
sess.close()
tf.reset_default_graph()
return train, test, plot, close
| bsd-3-clause |
depet/scikit-learn | sklearn/covariance/graph_lasso_.py | 1 | 21244 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
###############################################################################
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
cost = -log_likelihood(mle, precision_)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
###############################################################################
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
max_iter=100, verbose=False, return_costs=False,
eps=np.finfo(np.float).eps):
"""l1-penalized covariance estimator
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, tol)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars')
coefs = coefs[:, -1]
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
return covariance_, precision_, costs
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Parameters
----------
alpha : positive float, optional
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
plotted at each iteration.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, max_iter=100,
verbose=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
emp_cov = empirical_covariance(X)
self.covariance_, self.precision_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,)
return self
###############################################################################
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
`covariances_` : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
`precisions_` : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
`scores_` : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
Attributes
----------
`covariance_` : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
`precision_` : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
`alpha_`: float
Penalization parameter selected.
`cv_alphas_`: list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
max_iter=100, mode='cd', n_jobs=1, verbose=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
emp_cov = empirical_covariance(X)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=inner_verbose)
return self
| bsd-3-clause |
zorojean/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
kevin-intel/scikit-learn | examples/preprocessing/plot_discretization_classification.py | 22 | 7083 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
======================
Feature discretization
======================
A demonstration of feature discretization on synthetic classification datasets.
Feature discretization decomposes each feature into a set of bins, here equally
distributed in width. The discrete values are then one-hot encoded, and given
to a linear classifier. This preprocessing enables a non-linear behavior even
though the classifier is linear.
On this example, the first two rows represent linearly non-separable datasets
(moons and concentric circles) while the third is approximately linearly
separable. On the two linearly non-separable datasets, feature discretization
largely increases the performance of linear classifiers. On the linearly
separable dataset, feature discretization decreases the performance of linear
classifiers. Two non-linear classifiers are also shown for comparison.
This example should be taken with a grain of salt, as the intuition conveyed
does not necessarily carry over to real datasets. Particularly in
high-dimensional spaces, data can more easily be separated linearly. Moreover,
using feature discretization and one-hot encoding increases the number of
features, which easily lead to overfitting when the number of samples is small.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
# Code source: Tom Dupré la Tour
# Adapted from plot_classifier_comparison by Gaël Varoquaux and Andreas Müller
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
print(__doc__)
h = .02 # step size in the mesh
def get_name(estimator):
name = estimator.__class__.__name__
if name == 'Pipeline':
name = [get_name(est[1]) for est in estimator.steps]
name = ' + '.join(name)
return name
# list of (estimator, param_grid), where param_grid is used in GridSearchCV
classifiers = [
(LogisticRegression(random_state=0), {
'C': np.logspace(-2, 7, 10)
}),
(LinearSVC(random_state=0), {
'C': np.logspace(-2, 7, 10)
}),
(make_pipeline(
KBinsDiscretizer(encode='onehot'),
LogisticRegression(random_state=0)), {
'kbinsdiscretizer__n_bins': np.arange(2, 10),
'logisticregression__C': np.logspace(-2, 7, 10),
}),
(make_pipeline(
KBinsDiscretizer(encode='onehot'), LinearSVC(random_state=0)), {
'kbinsdiscretizer__n_bins': np.arange(2, 10),
'linearsvc__C': np.logspace(-2, 7, 10),
}),
(GradientBoostingClassifier(n_estimators=50, random_state=0), {
'learning_rate': np.logspace(-4, 0, 10)
}),
(SVC(random_state=0), {
'C': np.logspace(-2, 7, 10)
}),
]
names = [get_name(e) for e, g in classifiers]
n_samples = 100
datasets = [
make_moons(n_samples=n_samples, noise=0.2, random_state=0),
make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1),
make_classification(n_samples=n_samples, n_features=2, n_redundant=0,
n_informative=2, random_state=2,
n_clusters_per_class=1)
]
fig, axes = plt.subplots(nrows=len(datasets), ncols=len(classifiers) + 1,
figsize=(21, 9))
cm = plt.cm.PiYG
cm_bright = ListedColormap(['#b30065', '#178000'])
# iterate over datasets
for ds_cnt, (X, y) in enumerate(datasets):
print('\ndataset %d\n---------' % ds_cnt)
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=.5, random_state=42)
# create the grid for background colors
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# plot the dataset first
ax = axes[ds_cnt, 0]
if ds_cnt == 0:
ax.set_title("Input data")
# plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
# iterate over classifiers
for est_idx, (name, (estimator, param_grid)) in \
enumerate(zip(names, classifiers)):
ax = axes[ds_cnt, est_idx + 1]
clf = GridSearchCV(estimator=estimator, param_grid=param_grid)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print('%s: %.2f' % (name, score))
# plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]*[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name.replace(' + ', '\n'))
ax.text(0.95, 0.06, ('%.2f' % score).lstrip('0'), size=15,
bbox=dict(boxstyle='round', alpha=0.8, facecolor='white'),
transform=ax.transAxes, horizontalalignment='right')
plt.tight_layout()
# Add suptitles above the figure
plt.subplots_adjust(top=0.90)
suptitles = [
'Linear classifiers',
'Feature discretization and linear classifiers',
'Non-linear classifiers',
]
for i, suptitle in zip([1, 3, 5], suptitles):
ax = axes[0, i]
ax.text(1.05, 1.25, suptitle, transform=ax.transAxes,
horizontalalignment='center', size='x-large')
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/experimental/enable_halving_search_cv.py | 11 | 1226 | """Enables Successive Halving search-estimators
The API and results of these estimators might change without any deprecation
cycle.
Importing this file dynamically sets the
:class:`~sklearn.model_selection.HalvingRandomSearchCV` and
:class:`~sklearn.model_selection.HalvingGridSearchCV` as attributes of the
`model_selection` module::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from sklearn.model_selection import HalvingGridSearchCV
The ``# noqa`` comment comment can be removed: it just tells linters like
flake8 to ignore the import, which appears as unused.
"""
from ..model_selection._search_successive_halving import (
HalvingRandomSearchCV,
HalvingGridSearchCV
)
from .. import model_selection
# use settattr to avoid mypy errors when monkeypatching
setattr(model_selection, "HalvingRandomSearchCV",
HalvingRandomSearchCV)
setattr(model_selection, "HalvingGridSearchCV",
HalvingGridSearchCV)
model_selection.__all__ += ['HalvingRandomSearchCV', 'HalvingGridSearchCV']
| bsd-3-clause |
Alexsaphir/TP_EDP_Python | TP5.py | 1 | 1683 | # -*- coding: utf-8 -*-
# Fichier tp3.py
from numpy import * # importation du module numpy
from numpy.linalg import * # importation du module numpy.linalg
from matplotlib.pyplot import * # importation du module matplotlib.pyplot
from mpl_toolkits.mplot3d import Axes3D # importation du module mpl_toolkits.mplot3d
import time
from pylab import *
def U0(X):
Y = zeros(shape(X))
Y=sin(pi*X)+.25*sin(10.*pi*X)
return Y
def U1(X):
Y = zeros(shape(X))
return Y
def solex(X,ct):
return sin(pi*X)*cos(ct*pi)+.25*sin(10.*pi*X)*cos(10.*ct*pi)
print('Choix de la vitesse de transport c')
#c = float(input('c = '))
c = -2
Ns = int(1/.002)
h = 1./(Ns + 1.)
X = linspace(0.,1.,Ns+1)
Xh = X[0:Ns]
dt = .00101
T=1.
M = int((T/dt) - 1)
meth = 2
#Uj temps actuel
#Ujm temps precedent
#Tjn temps suivant
Uj = U0(Xh)
Ujm = zeros(shape(U0))
Ujn = zeros(shape(U0))
#Iteration 1
Ujn = Uj+dt*U1(Xh)
Uj, Ujm = Ujm, Uj
Ujn, Uj = Uj, Ujn
A= diag(-ones(Ns-1),1)-diag(ones(Ns-1),-1)+2.*eye(Ns)
A=A/h/h
#Erreur
Err = 0
Errn = 0
#line1, = plot(linspace(0,1,100), solex(linspace(0,1,100),T), label = 'sol exacte')
for j in arange(1, M):
if( meth == 1):
for i in arange(1,Ns):
Ujn = 2.*Uj-Ujm-c*c*dt*dt*(A.dot(Uj))
if( meth == 2 ):
Ujn = solve( (eye(Ns)+c*c*dt*dt*A), 2.*Uj-Ujm)
#Calcul de l'erreur
U=solex(Xh,j*dt*c)
Errn = amax(absolute(U - Ujn))
if (Err < Errn):
Err = Errn
Uj, Ujm = Ujm, Uj
Ujn, Uj = Uj, Ujn
plot(Xh, Uj,label="Approché")
plot(linspace(0,1,500),solex(linspace(0,1,500),T*c),label='exacte')
xlabel('X')
ylabel('Y')
legend()
show()
disp(Err)
| lgpl-3.0 |
larsmans/scikit-learn | sklearn/cross_decomposition/cca_.py | 18 | 3129 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
wlamond/scikit-learn | sklearn/manifold/setup.py | 43 | 1283 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
ErillLab/CogsNormalizedPosteriorProbabilityThetas | igc_pipeline.py | 1 | 39929 | # -*- coding: utf-8 -*-
"""
This script automates processing of the IGC dataset.
Created on Tue Mar 17 19:13:08 2015
@author: Talmo
"""
import os
import sys
import time
import ast
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from Bio import SeqIO
from PSSMScorer import PSSMScorer
#%% Configuration
# Output verbosity
verbosity = 1 # 0 = no output, 1 = minimal, 2 = debugging
# Base paths
base_path = "/home/cuda/2TB/metagenomics/" # metagenomics folder
#script_dir_path = os.path.dirname(os.path.abspath(__file__)) + os.path.sep
script_dir_path = base_path+"/mg_pipeline/" + os.path.sep
IGC_path = base_path + "IGC/"
alt_IGC_path = "/media/sf_D_DRIVE/metagenomics/IGC/" # 4TB drive
# Pipeline data paths
samples_index_path = script_dir_path + "data/samples_index.csv"
eggnog_tax_path = script_dir_path + "data/eggnogv4_taxonomy.csv"
# IGC Data paths
gene_summary_path = IGC_path + "3.IGC.AnnotationInfo/IGC.annotation.summary.v2"
scaftigs_path = IGC_path + "4.IndividualAssmeblies/"
orfs_fasta_path = IGC_path + "5.IndividualORFs/"
# Processed data paths
genes_path = IGC_path + "Genes/"
orfs_path = IGC_path + "ORFs/"
#operons_path = IGC_path + "Operons/"
operons_path = alt_IGC_path+"Operons/" ####################################################################
# PSSM scoring
binding_sites_path = base_path + "binding_sites/"
Firmicutes_LexA = PSSMScorer(binding_sites_path + "Firmicutes_LexA.txt")
Firmicutes_LexA.initialize_estimator(bg_mu=-17.681878, bg_sigma=8.2267625)
GammaProteobacteria_LexA = PSSMScorer(binding_sites_path + "GammaProteobacteria_LexA.txt")
GammaProteobacteria_LexA.initialize_estimator(bg_mu=-21.660313, bg_sigma=8.476820)
Grampos_LexA = PSSMScorer(binding_sites_path + "Grampositive_LexA.txt")
Firmicutes_Actino_LexA = PSSMScorer(binding_sites_path + "Firmicutes_Actinobacteria_LexA.txt")
YrkD_PSSM = PSSMScorer(binding_sites_path + "Bacillales_YrkD.txt", "YrkD")
YrkD_PSSM.initialize_estimator(bg_mu=-16.998722, bg_sigma=7.202773)
CsoR_PSSM = PSSMScorer(binding_sites_path + "Bacillales_CsoR.txt", "CsoR")
CsoR_PSSM.initialize_estimator(bg_mu=-16.362418, bg_sigma=6.652658)
CopY_PSSM = PSSMScorer(binding_sites_path + "CopY.txt", "CopY")
CopY_PSSM.initialize_estimator(bg_mu=-27.392480, bg_sigma=10.159172)
# Score results
scores_path = alt_IGC_path + "Scores/"
# Operon prediction
threshold_IGI = 50 # max intergenic interval (bp)
promoter_region = (-250, +50) # bp relative to gene start
# BLASTing
blast_columns = ["query", "hit", "percentage_identity", "alignment_length", "num_mismatches", "num_gap_openings", "q.start", "q.end", "s.start", "s.end", "e_value", "score_bits"]
#%% General functions
# From the timeit module:
if sys.platform == 'win32':
time = time.clock # On Windows, the best timer is time.clock
else:
time = time.time # On most other platforms the best timer is time.time
def log(msg, start_time=None, verbosity_level=1):
""" Convenience function for printing pipeline messages.
Args:
msg:
the message string
start_time:
if provided, displays time elapsed since this time
verbosity_level:
if the verbosity is less than this, nothing will be printed
Example:
>>> start_time = time()
>>> log("Done processing.", start_time)
Done processing. [10.94s] """
if verbosity_level <= verbosity:
if start_time is not None:
msg = msg + " [%.2fs]" % (time() - start_time)
print msg
#%% Samples and paths
# Load pre-processed samples index
samples_index = pd.read_csv(samples_index_path, index_col="sample").dropna()
all_samples = samples_index.index.tolist()
# Build dictionary mapping all sample names and aliases to a unique sample name
sample_aliases = samples_index.index.to_series().to_dict()
sample_aliases.update(samples_index.alias.reset_index().set_index("alias").sample.to_dict())
def get_unique_sample_name(query_sample, error_not_found=False):
""" Returns a unique sample name by resolving ambiguity with aliases.
Args:
query_sample: str
Sample name or alias
error_not_found: bool, default False
If True, raises LookupError if query_sample is not found.
Returns:
unique_sample: str
Unique and unambiguous sample name or None.
This can be used to index into the samples_index table.
Example:
>>> get_unique_sample_name("MH192")
'MH0192'
>>> samples_index.loc[get_unique_sample_name("MH192")]
...
"""
if query_sample in sample_aliases.keys():
return sample_aliases[query_sample]
elif error_not_found:
raise LookupError("Sample not found: %s" % query_sample)
else:
return None
def get_sample_info(sample):
""" Returns information about a sample.
Args:
sample: str
Sample name or alias.
Returns:
sample_info: Series
Sample information from samples_index table, e.g.:
alias, SRA ID, BioSample ID, study, NCBI taxon ID/name, ...
"""
sample_name = get_unique_sample_name(sample)
sample_info = pd.Series({"sample": sample_name}).append(samples_index.loc[sample_name])
return sample_info
def get_sample_paths(sample, original_paths=False):
""" Returns paths to sample data files.
Args:
sample: str
Sample or alias. Raises an exception if not found.
use_original: bool, default False
If True, uses the original filenames from the downloaded files.
If False, assumes ``standardize_paths()`` has renamed the files.
Returns:
paths: Series
Absolute paths to the files associated with the sample. Keys:
:scaftigs: Assembled scaftigs (in ``scaftigs_path``)
:ORFs_fasta: ORF FASTA files (in ``orfs_fasta_path``)
:genes: Summary of genes in sample (see ``save_sample_genes()``)
:ORFs: ORF table without sequences (see ``parse_ORFs()``)
:operons: Predicted operons (see ``predict_operons()``)
Example:
>>> get_sample_paths('MH192')
scaftigs /home/cuda/2TB/metagenomics/IGC/4.IndividualAs...
ORFs_fasta /home/cuda/2TB/metagenomics/IGC/5.IndividualOR...
genes /home/cuda/2TB/metagenomics/IGC/Genes/MH0192.csv
ORFs /home/cuda/2TB/metagenomics/IGC/ORFs/MH0192.csv
operons /home/cuda/2TB/metagenomics/IGC/Operons/MH0192...
Name: MH0192, dtype: object
"""
# Get unique sample name
sample = get_unique_sample_name(sample, True)
# Initialize paths Series
paths = pd.Series(name=sample)
# IGC data
if original_paths:
paths["scaftigs"] = scaftigs_path + samples_index.at[sample, "Assemblies_filenames"]
paths["ORFs_fasta"] = orfs_fasta_path + samples_index.at[sample, "ORFs_filenames"]
else:
paths["scaftigs"] = scaftigs_path + sample + ".fna"
paths["ORFs_fasta"] = orfs_fasta_path + sample + ".fna"
# Processed data
paths["genes"] = genes_path + sample + ".csv"
paths["ORFs"] = orfs_path + sample + ".csv"
paths["operons"] = operons_path + sample + ".csv"
# Make sure paths are absolute
paths = paths.apply(os.path.abspath)
return paths
def standardize_paths(sample):
""" Renames the data files for a sample to their unique sample name.
Affects scaftigs (assemblies) and ORF sequence FASTA files.
Args:
sample: str
Sample or alias
"""
# Get paths
original_paths = get_sample_paths(sample, original_paths=True)
new_paths = get_sample_paths(sample, original_paths=False)
# Rename scaftigs
if not os.path.exists(new_paths["scaftigs"]):
if os.path.exists(original_paths["scaftigs"]):
try:
os.rename(original_paths["scaftigs"], new_paths["scaftigs"])
log("Renamed assembled scaftig for sample %s." % sample, verbosity_level=2)
except:
log("Could not rename original assembled scaftig for sample %s." % sample, verbosity_level=1)
else:
log("Could not find original assembled scaftig for sample %s." % sample, verbosity_level=1)
# Rename ORF sequences
if not os.path.exists(new_paths["ORFs_fasta"]):
if os.path.exists(original_paths["ORFs_fasta"]):
try:
os.rename(original_paths["ORFs_fasta"], new_paths["ORFs_fasta"])
log("Renamed ORF sequences for sample %s." % sample, verbosity_level=2)
except:
log("Could not rename original ORF sequences for sample %s." % sample, verbosity_level=1)
else:
log("Could not find original ORF sequences for sample %s." % sample, verbosity_level=1)
def has_data(sample):
""" Checks if the sample has its data files.
Args:
sample: str
The sample to be queried
Returns:
data_exists: Series
Same keys as returned by ``get_sample_paths()`` but will be True
for the ones for which the file exists.
"""
return get_sample_paths(sample).apply(os.path.exists)
def check_samples_data():
""" Returns a table of all samples and which data files are present. """
return pd.Series(all_samples, index=all_samples).apply(has_data)
#def get_valid_samples():
# """ Returns a list of all samples that have all data files present. """
# valid = check_samples_data().all(1)
# return valid.index[valid].tolist()
def get_MetaHit(study=2010):
""" Returns the names of the samples from the MetaHit database.
Args:
study: int or str, default 2010
Year of the study to return. Studies:
:2010: Qin et al. (2010) | doi:10.1038/nature08821
:2012: Qin et al. (2012) | doi:10.1038/nature11450
:2013: Le Chatelier et al. (2013) | doi:10.1038/nature12506
:"all": Returns all studies.
Returns:
MetaHit_samples: list
The samples corresponding to the study selected
"""
queries = {"2010": "A human gut microbial gene catalogue", \
"2012": "A metagenome-wide association", \
"2013": "Richness of human gut"}
if study == 'all':
return [item for inner_list in [get_MetaHit(k) for k in queries.keys()] for item in inner_list]
return samples_index[samples_index.study.str.contains(queries[str(study)])].index.tolist()
def get_all_samples(HMP=True):
""" Returns a list of all samples with data.
Args:
HMP: bool, default True
If False, filters out the samples that belong to the Human
Microbiome Project.
"""
if not HMP:
from_HMP = samples_index.study.str.contains("Human Microbiome Project")
return samples_index.index[~from_HMP].index.tolist()
return all_samples
#def standardize_paths(sample):
#%% Genes processing
# Regular expression that matches all sample names and aliases
samples_regex = "(" + "|".join(sample_aliases.keys()) + ")"
def load_gene_summary(limit=None, extract_samples=False):
""" Loads the integrated gene summary.
Warning: This may take a while as there are 9879896 rows in the table.
Args:
limit: int, default None
The number of rows to return. If None, returns all.
extract_sample: bool, default False
Tries to extract the sample that each gene comes from and appends
it to the "sample" column (see ``extract_sample()``).
Returns:
gene_summary: DataFrame
The IGC gene summaries table indexed by gene_id.
Example:
>>> print load_gene_summary(1)
================ =========== ============ ============= ======= ======= ====== ======= ============== =============== ================ ======================================================================= ================
gene_name gene_length completeness cohort_origin phylum genus kegg eggNOG sample_freq individual_freq eggNOG_funccat kegg_funccat cohort_assembled
================ =========== ============ ============= ======= ======= ====== ======= ============== =============== ================ ======================================================================= ================
T2D-6A_GL0083352 88230 Complete CHN unknown unknown K01824 COG5184 0.224151539069 0.236448598131 Lipid Metabolism Cell cycle control, cell division, chromosome partitioning;Cytoskeleton EUR;CHN;USA
================ =========== ============ ============= ======= ======= ====== ======= ============== =============== ================ ======================================================================= ================
"""
t = time()
gene_summary_cols = ['gene_id', 'gene_name', 'gene_length', 'completeness', \
'cohort_origin', 'phylum', 'genus', 'kegg', 'eggNOG', 'sample_freq', \
'individual_freq', 'eggNOG_funccat', 'kegg_funccat', 'cohort_assembled']
gene_summary = pd.read_table(gene_summary_path, names=gene_summary_cols, index_col='gene_id', nrows=limit)
log("Loaded %d genes from summary." % (gene_summary.shape[0]), t)
if extract_samples:
gene_summary["sample"] = extract_sample(gene_summary["gene_name"])
return gene_summary
def extract_sample(gene_names):
""" Returns samples given one or more gene_names.
Args:
gene_names: str, list, dict, Series
One or more gene_names
Returns:
samples: str, list, dict, Series
Returns a Series if input was not of above types.
Example:
>>> genes = load_sample_genes('MH192')
>>> extract_sample(genes.index)
0 MH0192
...
21241 MH0192
Name: gene_name, Length: 21242, dtype: object
>>> extract_sample(['MH0192_GL0000004', 'MH0192_GL0000005', 'MH0193_GL0000001'])
['MH0192', 'MH0192', 'MH0193']
"""
# Convert to Series
if type(gene_names) != pd.Series:
original_type = type(gene_names)
gene_names = pd.Series(gene_names)
# Match against regex
t = time()
samples = gene_names.str.extract(samples_regex)
v = [1, 2][(time() - t) < 1.0]
log("Extracted sample regex.", t, v)
# Get unique names for samples
t = time()
samples = samples.apply(get_unique_sample_name)
v = [1, 2][(time() - t) < 1.0]
log("Disambiguated sample names.", t, v)
if original_type == str:
return samples.ix[0]
elif original_type == list:
return samples.tolist()
elif original_type == dict:
return samples.to_dict()
else:
return samples
def save_sample_genes(overwrite=False, gene_summary=None):
""" Splits the integrated gene summary into individual tables for each
sample.
To load these files, see ``load_sample_genes()``.
See ``genes_path`` for path to data files.
Args:
overwrite: bool, default False
If True, will overwrite pre-existing gene files.
gene_summary: DataFrame, default None
Accepts a pre-loaded gene_summary DataFrame (see
``load_gene_summary()``). If None, calls that function to load it.
"""
# Create parent folder if it does not exist
if not os.path.exists(genes_path):
os.makedirs(genes_path)
# Load the combined gene summary table
if gene_summary is None:
gene_summary = load_gene_summary()
# Extract samples from gene names
if "sample" not in gene_summary.columns:
gene_summary["sample"] = extract_sample(gene_summary["gene_name"])
# Drop genes without sample
n_total = len(gene_summary)
gene_summary = gene_summary[~gene_summary.sample.isnull()]
n_after = len(gene_summary)
log("%d/%d genes without sample (%.2f%%)." % (n_total - n_after, n_total, 100.0*(n_total - n_after)/n_total), None, 2)
empty_samples = samples_index[~(samples_index.Assemblies_filenames.isnull() | samples_index.ORFs_filenames.isnull())].index.diff(gene_summary.sample.unique())
log("%d/%d samples without genes (%.2f%%)." % (len(empty_samples), len(samples_index), 100.0*len(empty_samples)/len(samples_index)), None, 2)
# Group by sample and save data
t = time()
gene_summary = gene_summary.groupby("sample", sort=False)
for sample, sample_genes in gene_summary:
sample_genes_path = get_sample_paths(sample).genes
if overwrite or not os.path.exists(sample_genes_path):
sample_genes.to_csv(sample_genes_path)
log("Saved genes for %d samples individually." % gene_summary.ngroups, t)
def load_sample_genes(sample):
""" Loads the genes table for a sample.
See ``save_sample_genes()`` for table creation.
Args:
sample: str
Sample name or alias
Returns:
genes: DataFrame
Listing and information about each called gene in the sample.
"""
# Get path
sample_genes_path = get_sample_paths(sample).genes
# Check if it exists
if not os.path.exists(sample_genes_path):
raise EnvironmentError("Genes file for %s does not exist: %s" % (sample, sample_genes_path))
# Load and return
sample_genes = pd.read_csv(get_sample_paths(sample).genes, index_col="gene_name")
sample_genes.sort_index(inplace=True)
return sample_genes
def get_genes(sample):
""" Loads the genes table for a sample. Shortcut for
``load_sample_genes()``.
See ``save_sample_genes()`` for table creation.
Args:
sample: str
Sample name or alias
Returns:
genes: DataFrame
Listing and information about each called gene in the sample.
"""
return load_sample_genes(sample)
#%% Scaftigs
def get_scaftigs(sample, index_only=False, convert_to_str=True):
""" Loads the assembled scaftig nucleotide sequences.
Args:
sample: str
Sample name or alias
index_only: bool, default False
If True, will read in the whole file and parse it into a dict.
If False, will only index the file. This uses less memory by not
loading sequences into memory (see Bio.SeqIO.index).
convert_to_str: bool, default True
Converts Bio.Seq objects to strings. This improves performance
by avoiding the overhead of Bio.Seq objects at the cost of utility
functions in Bio.Seq. Only applicable when index_only is False.
Returns:
scaftigs: dict or dict-like
The scaftig sequences indexed by their name.
Returns None if not found.
"""
t = time()
# Get unique sample and paths
sample = get_unique_sample_name(sample)
file_path = get_sample_paths(sample)["scaftigs"]
# Check if file exists
if not os.path.exists(file_path):
log("Could not find scaftigs for sample %s in: %s" % (sample, file_path), verbosity_level=1)
return
if index_only:
# Create file index
scaftigs = SeqIO.index(file_path, "fasta")
log("Indexed scaftigs for sample %s." % sample, t, 2)
else:
# Read and parse file
scaftigs = SeqIO.to_dict(SeqIO.parse(file_path, "fasta"))
# Downcast to strings
if convert_to_str:
for scaf, seq in scaftigs.items():
scaftigs[scaf] = str(seq.seq).upper()
log("Read and parsed scaftigs for sample %s." % sample, t, 2)
return scaftigs
#%% ORFs
def get_ORF_seqs(sample, index_only=False, convert_to_str=True):
""" Loads the ORF FASTA nucleotide sequences.
Args:
sample: str
Sample name or alias
index_only: bool, default False
If True, will read in the whole file and parse it into a dict.
If False, will only index the file. This uses less memory by not
loading sequences into memory (see Bio.SeqIO.index).
convert_to_str: bool, default True
Converts Bio.Seq objects to strings. This improves performance
by avoiding the overhead of Bio.Seq objects at the cost of utility
functions in Bio.Seq. Only applicable when index_only is False.
Returns:
ORF_seqs: dict or dict-like
The ORF sequences indexed by their name in the ORFs table.
Returns None if not found.
"""
t = time()
# Get unique sample and paths
sample = get_unique_sample_name(sample)
file_path = get_sample_paths(sample)["ORFs_fasta"]
# Check if file exists
if not os.path.exists(file_path):
log("Could not find ORF sequences for sample %s in: %s" % (sample, file_path), verbosity_level=1)
return
if index_only:
# Create file index
ORF_seqs = SeqIO.index(file_path, "fasta")
log("Indexed ORF sequences for sample %s." % sample, t, 2)
else:
# Read and parse file
ORF_seqs = SeqIO.to_dict(SeqIO.parse(file_path, "fasta"))
# Downcast to strings
if convert_to_str:
for orf, seq in ORF_seqs.items():
ORF_seqs[orf] = str(seq.seq).upper()
log("Read and parsed ORF sequences for sample %s." % sample, t, 2)
return ORF_seqs
def parse_ORFs(sample, overwrite=False):
""" Parses the FASTA headers in the ORF FASTA file and saves the data.
Args:
sample: str
Sample name or alias
overwrite: boolean, default False
If True, will overwrite saved ORF table, otherwise will attempt to
load the table from cache.
Returns:
ORFs: DataFrame
Table with data on the open reading frames and the columns:
gene_name, orf_type, scaffold, start, end, completeness
Example:
>>> ORFs = parse_ORFs('MH0001')
Parsed 21452 ORFs from MH0001. [0.44s]
>>> ORFs.head(1)
...
"""
t = time()
# Get unique sample and paths
sample = get_unique_sample_name(sample)
sample_paths = get_sample_paths(sample)
# Check for cache
if not overwrite and os.path.exists(sample_paths["ORFs"]):
ORFs = pd.read_csv(sample_paths["ORFs"], index_col=0)
log("Loaded %d cached ORFs from %s." % (ORFs.shape[0], sample), t)
return ORFs
# Read in FASTA headers
with open(sample_paths["ORFs_fasta"]) as f:
lines = [line[1:-1] for line in f if line[0] == '>']
# Check header format
ORF_pattern = '(?P<gene_name>[^ ]+)[ ]+\\[(?P<orf_type>[^\]]+)\\][ ]+locus=(?P<scaffold>[^:]+):(?P<start>[^:]+):(?P<end>[^:]+):(?P<strand>[^:\[]+)\\[(?P<completeness>[^\[\]]+)'
if "]" not in lines[0]:
ORF_pattern = '(?P<gene_name>[^\t]+)\tstrand:(?P<strand>[+-]) start:(?P<start>\d+) stop:(?P<end>\d+) length:(?P<length>\d+) start_codon:(?P<start_codon>\w+) stop_codon:(?P<stop_codon>\w+) gene_type:(?P<gene_type>\w+)'
# Parse FASTA headers for ORFs
ORFs = pd.Series(lines).str.extract(ORF_pattern).convert_objects(convert_numeric=True)
# Standardize alternative format
if "]" not in lines[0]:
# completeness
ORFs.loc[(ORFs.start_codon == 'no') & (ORFs.stop_codon == 'no'), 'completeness'] = "Lack both ends"
ORFs.loc[(ORFs.start_codon == 'yes') & (ORFs.stop_codon == 'no'), 'completeness'] = "Lack 3'-end"
ORFs.loc[(ORFs.start_codon == 'no') & (ORFs.stop_codon == 'yes'), 'completeness'] = "Lack 5'-end"
ORFs.loc[(ORFs.start_codon == 'yes') & (ORFs.stop_codon == 'yes'), 'completeness'] = "Complete"
# orf_type
ORFs.loc[:, "orf_type"] = "gene"
# scaffold
ORFs.loc[:, "scaffold"] = ORFs.gene_name.str.extract("(?P<gene_name>.+)_gene")
# Re-order columns
ORFs = ORFs.loc[:, ['gene_name', 'orf_type', 'scaffold', 'start', 'end', 'strand', 'completeness']]
# Set gene_name as index
ORFs.set_index("gene_name", inplace=True)
# Create parent folder if it does not exist
if not os.path.exists(orfs_path):
os.makedirs(orfs_path)
# Save
ORFs.to_csv(sample_paths["ORFs"])
log("Parsed %d ORFs from %s." % (ORFs.shape[0], sample), t)
return ORFs
def get_ORFs(sample):
""" Loads the ORFs table for a sample. See ``parse_orfs()`` to create an
ORFs table for a sample.
Args:
sample: str
Sample name or alias
Returns:
ORFs: DataFrame
Table with a listing and information about each ORF predicted in
the sample.
"""
t = time()
# Get unique sample and paths
sample = get_unique_sample_name(sample)
sample_paths = get_sample_paths(sample)
# Check for cache
if os.path.exists(sample_paths["ORFs"]):
ORFs = pd.read_csv(sample_paths["ORFs"], index_col=0)
log("Loaded %d cached ORFs from %s." % (ORFs.shape[0], sample), t, 2)
return ORFs
#%% Operons
def predict_operons(sample, overwrite=False):
""" Predicts operons for the given sample.
Args:
sample: str
Sample for which to predict operons.
overwrite: bool, default False
If False, loads predictions from disk if they exist.
"""
t = time()
# Get paths
sample_paths = get_sample_paths(sample)
# Check for cache
if not overwrite and os.path.exists(sample_paths["operons"]):
operons = pd.read_csv(sample_paths["operons"], index_col=0, converters={'genes':ast.literal_eval})
log("Loaded %d cached operons from %s." % (operons.shape[0], sample), t)
return operons
# Load sample scaffolds (scafftigs)
scaffolds = SeqIO.index(sample_paths["scaftigs"], 'fasta')
# Load the predicted open reading frames
ORFs = parse_ORFs(sample)
# Group by 'scaffold' column and do groupwise operations
ORFs = ORFs.groupby('scaffold', sort=False)
n_ORFs = ORFs.ngroups
print "Number of ORFs", n_ORFs
ocnt = 0
scnt = 0
ofname = operons_path+ sample+".csv"
print "Creating", ofname
out_fp = open(ofname, "w")
out_fp.write("operon,genes,strand,promoter_start,promoter_end,promoter_seq,head_completeness\n")
#operon_tables = []; i = 0
for scaffold_name, scaf_ORFs in ORFs:
scnt += 1
#print "Process scaffold", scnt, scaffold_name
scaf_info = scaffolds[scaffold_name]
scaf_len = len(scaf_info)
for strand, strand_ORFs in scaf_ORFs.groupby('strand', sort=False):
# Sort by start values
if strand == "-":
strand_ORFs.sort('end', ascending=False, inplace=True)
# Compute intergenic intervals
IGIs = strand_ORFs.start[:-1].values - strand_ORFs.end[1:].values
else:
strand_ORFs.sort('start', inplace=True)
# Compute intergenic intervals
IGIs = strand_ORFs.start[1:].values - strand_ORFs.end[:-1].values
# Find operon gene indices
head_genes = np.where(IGIs > threshold_IGI)[0] + 1
operons_start = np.hstack(([0], head_genes))
operons_end = np.hstack((head_genes, [strand_ORFs.shape[0]])) #np.hstack((head_genes, [scaf_ORFs.shape[0]]))
if strand == "-":
head_gene_start = strand_ORFs.end.iloc[operons_start]
else:
head_gene_start = strand_ORFs.start.iloc[operons_start] #scaf_ORFs.start.iloc[operons_start]
if scaffold_name == "scaffold42382_1":
print scaffold_name, strand, head_gene_start
# Get promoter regions
rmult = 1
if strand == "-":
rmult = -1 # Go the other way for reverse strands
promoter_start = (head_gene_start + rmult*promoter_region[0]).clip(1).reset_index(drop=True)
promoter_end = (head_gene_start + rmult*promoter_region[1]).reset_index(drop=True)
if strand == "-":
for i in range(0, len(promoter_start)):
if promoter_start[i] > scaf_len:
promoter_start[i] = scaf_len
# Talmo indicated that if the operon has start>end, to swap them. So 1795,1710 becomes 1710,1795
#promoter_seq = [str(scaffolds[scaffold_name][s-1:e].seq) for s, e in zip(promoter_end, promoter_start)]
temp = promoter_start[i]
promoter_start[i] = promoter_end[i]
promoter_end[i] = temp
else:
for i in range(0, len(promoter_end)):
if promoter_end[i] > scaf_len:
promoter_end[i] = scaf_len
promoter_seq = [str(scaffolds[scaffold_name][s-1:e].seq) for s, e in zip(promoter_start, promoter_end)]
# Build operon table
strand_operons = pd.DataFrame()
strand_operons['genes'] = [strand_ORFs.index[op_start:op_end].tolist() for op_start, op_end in zip(operons_start, operons_end)]
strand_operons['strand'] = strand
strand_operons['promoter_start'] = promoter_start
strand_operons['promoter_end'] = promoter_end
strand_operons['promoter_seq'] = promoter_seq
strand_operons['head_completeness'] = strand_ORFs.ix[op_start, 'completeness']
slen = len(strand_operons)
for i in range(0, slen):
glen = len(strand_operons['genes'][i])
gout_str = "["
for g in range(0,glen):
gout_str += "'"+strand_operons['genes'][i][g]+"', "
gout_str = gout_str[:-2] + "]"
if glen > 1:
gout_str = '"'+gout_str+'"'
out_str=str(ocnt)+","+gout_str+","+strand+","+str(promoter_start[i])+","+str(promoter_end[i])+","+promoter_seq[i]+","+strand_operons['head_completeness'][i]
out_fp.write("%s\n" % out_str)
ocnt+=1
# Append to list of operon tables
#operon_tables.append(strand_operons)
#i = i + 1
#if verbosity >= 2 and i % (n_ORFs / 5) == 0:
# log("%.2f%%: %d/%d" % (float(i) / n_ORFs * 100, i, n_ORFs), None, 2)
# Merge operon tables
#operons = pd.concat(operon_tables, ignore_index=True)
#operons.index.name = "operon"
# Create parent folder if it does not exist
#if not os.path.exists(operons_path):
# os.makedirs(operons_path)
# Save
#operons.to_csv(sample_paths["operons"])
#log("Predicted %d operons for %s." % (operons.shape[0], sample), t)
#return operons
out_fp.close()
return []
def get_operons(sample):
""" Returns the operons for a sample if it exists.
Args:
sample: str
Sample name or alias
Returns:
operons: DataFrame
Table with listing of predicted operons for a sample.
"""
# Get path
operons_path = get_sample_paths(sample)["operons"]
# Check for cache
if os.path.exists(operons_path):
t = time()
operons = pd.read_csv(operons_path, index_col=0, converters={'genes':ast.literal_eval})
log("Loaded %d cached operons from %s." % (operons.shape[0], sample), t, 2)
return operons
else:
return predict_operons(sample)
def get_genes2operon(genes, operons):
""" Returns a Series that maps gene names to the operons they belong to.
Args:
genes: DataFrame
See ``get_genes()``.
operons: DataFrame
See ``get_operons()``.
Returns:
genes2operon: Series
Operon numbers indexed by the gene names.
"""
return pd.Series(index=np.hstack(operons.genes), data=np.repeat(operons.index.values, operons.genes.map(len)))
#%% BLASTing
def get_bash_array(samples):
""" Returns a bash array of the samples.
Args:
samples: list
List of sample names.
Returns:
bash_array: str
A formatted bash array containing all samples names as strings."""
return 'samples=("' + '" "'.join(samples) + '")'
#%% PSSM scoring
def score_sample(sample, PSSM, overwrite=False, soft_max=False):
""" Scores the promoters of all the operons in a sample using the PSSM.
Args:
sample: str
Name of the sample to score
PSSM: PSSMScorer
PSSM to use for scoring
overwrite: bool, default False
If False, will load cached scores from disk
Returns:
scores: DataFrame
The scores for the sample
"""
# Validate sample name
sample = get_unique_sample_name(sample)
# Get data file path
pssm_scores_path = scores_path + PSSM.name + "/"
sample_scores_path = pssm_scores_path + sample + ".h5"
# Check for cache
if not overwrite and os.path.exists(sample_scores_path):
print "Have scores"
return get_sample_scores(sample, PSSM)
# Load operons for sample
operons = get_operons(sample)
# Extract promoters and capitalize
seqs = operons.promoter_seq.str.upper()
# Score all the promoter sequences
t = time()
scores = seqs.apply(PSSM.score).apply(pd.Series, args=([["+","-"]]))
log("Scored %s: %d sequences, %d bp." % (sample, len(seqs), seqs.apply(len).sum()), t, 1)
# Compute soft-max
if soft_max:
scores["soft_max"] = scores.applymap(np.exp).sum(1).map(np.log)
# Check if containing folder exists
if not os.path.exists(pssm_scores_path):
os.makedirs(pssm_scores_path)
# Check if scores file exists
if os.path.exists(sample_scores_path):
os.remove(sample_scores_path) # delete existing
# Save to HDF5
t = time()
scores.to_hdf(sample_scores_path, "table", format="fixed", append=False, mode="w")
log("Saved scores for %s: %s" % (sample, sample_scores_path), t, 1)
return scores
def get_scores_path(sample, PSSM_name):
""" Returns the path to the scores for the given sample and PSSM name. """
# Get name from PSSM instance
if isinstance(PSSM_name, PSSMScorer):
PSSM_name = PSSM_name.name
# Validate sample name
sample = get_unique_sample_name(sample)
# Get data file path
pssm_scores_path = scores_path + PSSM_name + "/"
sample_scores_path = pssm_scores_path + sample + ".h5"
return sample_scores_path
def get_sample_scores(sample, PSSM_name, soft_max=True):
""" Loads scores for a given sample and PSSM name. """
# Get path to scores
sample_scores_path = get_scores_path(sample, PSSM_name)
# Check if file exists
if not has_score(sample, PSSM_name):
raise EnvironmentError("Scores could not be found.")
# Load scores
t = time()
scores = pd.read_hdf(sample_scores_path, "table")
# Take soft max
if soft_max:
scores = scores.applymap(np.exp).sum(1).map(np.log)
log("Loaded cached %s scores for %s." % (PSSM_name, sample), t, 2)
return scores
def has_score(sample, PSSM_name):
""" Returns True if the sample has scores under a PSSM name. """
# Check if file exists
return os.path.exists(get_scores_path(sample, PSSM_name))
def get_all_with_scores(PSSM_name):
""" Returns a list with the names of all samples that have a score for the
given PSSM name. """
return [sample for sample in get_all_samples() if has_score(sample, PSSM_name)]
#%% Summary stats
def get_sample_summary(sample, PSSM):
sample = get_unique_sample_name(sample)
# Load sample data
genes = load_sample_genes(sample)
ORFs = parse_ORFs(sample)
operons = predict_operons(sample)
scores = get_sample_scores(sample, PSSM)
stats = pd.Series()
stats["operons"] = len(operons)
stats["ORFs"] = len(ORFs)
stats["genes"] = len(genes)
stats["has_phylum"] = (genes.phylum != "unknown").sum()
stats["has_COG"] = (genes.eggNOG != "unknown").sum()
stats["has_both"] = ((genes.phylum != "unknown") & (genes.eggNOG != "unknown")).sum()
# Find hits
hit_threshold = 8.0
hits = scores.applymap(lambda x: x >= hit_threshold)
stats["hit_sites"] = hits.applymap(sum).sum(1).sum()
hit_operons = hits.applymap(sum).sum(1) > 0
stats["hit_operons"] = hit_operons.sum()
hit_ORFs = np.hstack(operons.loc[hit_operons, "genes"])
stats["hit_ORFs"] = len(hit_ORFs)
hit_genes = genes.index.intersection(hit_ORFs)
stats["hit_genes"] = len(hit_genes)
stats["hit_has_phylum"] = (genes.loc[hit_genes, "phylum"] != "unknown").sum()
stats["hit_has_COG"] = (genes.loc[hit_genes, "eggNOG"] != "unknown").sum()
stats["hit_has_both"] = ((genes.loc[hit_genes, "eggNOG"] != "unknown") & (genes.loc[hit_genes, "phylum"] != "unknown")).sum()
return stats
def get_samples_summary(samples="all", PSSM=Firmicutes_LexA):
if samples == "all":
samples = get_all_samples()
stats = pd.Series(samples).apply(lambda x: get_sample_summary(x, PSSM))
stats.index = samples
stats.index.name = "sample"
return stats
def do_score_test():
sample_name="MH0239"
bsite_pssm = GammaProteobacteria_LexA # Check scores_path near top
bsite_pssm = Firmicutes_LexA
rescore = True
scores = score_sample(sample_name, bsite_pssm, overwrite=rescore)
#sm_scores = get_sample_scores(sample_name, bsite, soft_max=True)
def run_predict_operons(start_at, stop_before):
samples = get_all_samples()
for sample in samples:
if not sample[0:2]=="MH":
continue
snum = int(sample[2:])
if (snum>=start_at and snum<stop_before):
print sample
the_ops = predict_operons(sample, False)
def run_nonMH_predict_operons():
samples = get_all_samples()
for sample in samples:
if sample[0:2]=="MH":
continue
print sample
the_ops = predict_operons(sample, False)
def run_scores(start_at, stop_before, TF, bsite_fname):
print "binding sites path:", binding_sites_path, "fname:", bsite_fname
bsite_pssm = PSSMScorer(binding_sites_path + bsite_fname, TF)
samples = get_all_samples()
for sample in samples:
if not sample[0:2]=="MH":
continue
snum = int(sample[2:])
if (snum>=start_at and snum<stop_before):
print sample
the_scores = score_sample(sample, bsite_pssm, False)
def run_nonMH_scores(TF, bsite_fname):
print "binding sites path:", binding_sites_path, "fname:", bsite_fname
bsite_pssm = PSSMScorer(binding_sites_path + bsite_fname, TF)
samples = get_all_samples()
for sample in samples:
if sample[0:2]=="MH": #
continue
print sample
the_scores = score_sample(sample, bsite_pssm, False)
def change_score_path(which_drive):
global scores_path
if which_drive == "4TB":
scores_path = "/media/sf_D_DRIVE/metagenomics/IGC/Scores/"
else:
scores_path = IGC_path + "Scores/"
| gpl-3.0 |
ArthurChiao/hevc-mining | training-test/plot_qp.py | 1 | 2154 | #! /usr/bin/env python
import matplotlib
import matplotlib.pyplot as plt
import math
labels = []
qp_all = []
qp_class0 = []
qp_class1 = []
log_qp_class0 = []
log_qp_class1 = []
with open("ClassBCDE.input", "r") as f:
for line in f:
items = line.strip().split()
label = items[0]
labels.append(label)
qp = items[2].split(":")[1]
qp_all.append(qp)
qp = int(qp) + 1 # plus 1 to avoid log(0)
if label == "+1":
qp_class0.append(qp)
log_qp_class0.append(math.log(qp, 2))
elif label == "-1":
qp_class1.append(qp)
log_qp_class1.append(math.log(qp, 2))
else:
print("error label" + label)
title0 = "Class0: coding with small CUs (8x8 or 4x4)"
title1 = "Class1: coding with big CUs (> 8x8)"
fig_id = 0
# --------------------------------------------------------------
# plot qp histgram for different classes
# --------------------------------------------------------------
fig = plt.figure(fig_id)
fig_id += 1
ax11 = fig.add_subplot(211)
n, bins, patches = plt.hist(qp_class0, normed=1)
ax11.plot(bins)
ax11.set_title(' '.join(["QP histgram of", title0]))
ax11.set_xlabel("qp")
ax11.axis([0, 51, 0, max(n) * 1.1])
ax12 = fig.add_subplot(212)
n, bins, patches = plt.hist(qp_class1, normed=1)
ax12.plot(bins)
ax12.set_title(' '.join(["QP histgram of", title1]))
ax12.set_xlabel("qp")
ax12.axis([0, 51, 0, max(n) * 1.1])
# --------------------------------------------------------------
# plot log2(qp) histgram for different classes
# --------------------------------------------------------------
fig = plt.figure(fig_id)
fig_id += 1
num_bins = 15
ax11 = fig.add_subplot(211)
n, bins, patches = plt.hist(log_qp_class0, num_bins, normed=1)
ax11.plot(bins)
ax11.set_title(' '.join(["log2(QP) histgram of", title0]))
ax11.set_xlabel("log2(qp)")
ax11.axis([1, 6, 0, max(n) * 1.1])
ax12 = fig.add_subplot(212)
n, bins, patches = plt.hist(log_qp_class1, num_bins, normed=1)
ax12.plot(bins)
ax12.set_title(' '.join(["log2(QP) histgram of", title1]))
ax12.set_xlabel("log2(qp)")
ax12.axis([1, 6, 0, max(n) * 1.1])
plt.show()
| bsd-2-clause |
MartinDelzant/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
ABcDexter/python-weka-wrapper | python/weka/plot/clusterers.py | 2 | 3696 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# clusterers.py
# Copyright (C) 2014 Fracpete (pythonwekawrapper at gmail dot com)
import logging
import weka.plot as plot
if plot.matplotlib_available:
import matplotlib.pyplot as plt
from weka.core.dataset import Instances
from weka.clusterers import ClusterEvaluation
# logging setup
logger = logging.getLogger(__name__)
def plot_cluster_assignments(evl, data, atts=None, inst_no=False, size=10, title=None, outfile=None, wait=True):
"""
Plots the cluster assignments against the specified attributes.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evl: the cluster evaluation to obtain the cluster assignments from
:type evl: ClusterEvaluation
:param data: the dataset the clusterer was evaluated against
:type data: Instances
:param atts: the list of attribute indices to plot, None for all
:type atts: list
:param inst_no: whether to include a fake attribute with the instance number
:type inst_no: bool
:param size: the size of the circles in point
:type size: int
:param title: an optional title
:type title: str
:param outfile: the (optional) file to save the generated plot to. The extension determines the file format.
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
fig = plt.figure()
if data.class_index == -1:
c = None
else:
c = []
for i in xrange(data.num_instances):
inst = data.get_instance(i)
c.append(inst.get_value(inst.class_index))
if atts is None:
atts = []
for i in xrange(data.num_attributes):
atts.append(i)
num_plots = len(atts)
if inst_no:
num_plots += 1
clusters = evl.cluster_assignments
for index, att in enumerate(atts):
x = data.values(att)
ax = fig.add_subplot(
1, num_plots, index + 1)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title(data.attribute(att).name)
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if inst_no:
x = []
for i in xrange(data.num_instances):
x.append(i+1)
ax = fig.add_subplot(
1, num_plots, num_plots)
if c is None:
ax.scatter(clusters, x, s=size, alpha=0.5)
else:
ax.scatter(clusters, x, c=c, s=size, alpha=0.5)
ax.set_xlabel("Clusters")
ax.set_title("Instance number")
ax.get_xaxis().set_ticks(list(set(clusters)))
ax.grid(True)
if title is None:
title = data.relationname
fig.canvas.set_window_title(title)
plt.draw()
if not outfile is None:
plt.savefig(outfile)
if wait:
plt.show()
| gpl-3.0 |
nimagh/CNN_Implementations | GenerativeModels/VAE.py | 1 | 7780 | # -*- coding: utf-8 -*-
'''
Auto-Encoding Variational Bayes - Kingma and Welling 2013
Use this code with no warranty and please respect the accompanying license.
'''
import sys
sys.path.append('../common')
from tools_config import data_dir, expr_dir
import os, sys, shutil
import matplotlib.pyplot as plt
from tools_train import get_train_params, OneHot, count_model_params
from tools_train import vis_square
from datetime import datetime
from tools_general import tf, np
from tools_networks import deconv, conv, dense, clipped_crossentropy, dropout
import logging
def create_encoder(Xin, is_training, latentD, reuse=False, networktype='cdaeE'):
'''Xin: batchsize * H * W * Cin
output1-2: batchsize * Cout'''
with tf.variable_scope(networktype, reuse=reuse):
Xout = conv(Xin, is_training, kernel_w=4, stride=2, Cout=64, pad=1, act='reLu', norm='batchnorm', name='conv1') # 14*14
Xout = conv(Xout, is_training, kernel_w=4, stride=2, Cout=128, pad=1, act='reLu', norm='batchnorm', name='conv2') # 7*7
Zmu_op = dense(Xout, is_training, Cout=latentD, act=None, norm=None, name='dense_mean')
z_log_sigma_sq_op = dense(Xout, is_training, Cout=latentD, act=None, norm=None, name='dense_var')
return Zmu_op, z_log_sigma_sq_op
def create_decoder(Xin, is_training, latentD, Cout=1, reuse=False, networktype='vaeD'):
with tf.variable_scope(networktype, reuse=reuse):
Xout = dense(Xin, is_training, Cout=7 * 7 * 256, act='reLu', norm='batchnorm', name='dense1')
Xout = tf.reshape(Xout, shape=[-1, 7, 7, 256]) # 7
Xout = deconv(Xout, is_training, kernel_w=4, stride=2, Cout=256, epf=2, act='reLu', norm='batchnorm', name='deconv1') # 14
Xout = deconv(Xout, is_training, kernel_w=4, stride=2, Cout=Cout, epf=2, act=None, norm=None, name='deconv2') # 28
Xout = tf.nn.sigmoid(Xout)
return Xout
def create_vae_trainer(base_lr=1e-4, latentD=2, networktype='VAE'):
'''Train a Variational AutoEncoder'''
is_training = tf.placeholder(tf.bool, [], 'is_training')
Zph = tf.placeholder(tf.float32, [None, latentD])
Xph = tf.placeholder(tf.float32, [None, 28, 28, 1])
Zmu_op, z_log_sigma_sq_op = create_encoder(Xph, is_training, latentD, reuse=False, networktype=networktype + '_Enc')
Z_op = tf.add(Zmu_op, tf.multiply(tf.sqrt(tf.exp(z_log_sigma_sq_op)), Zph))
Xrec_op = create_decoder(Z_op, is_training, latentD, reuse=False, networktype=networktype + '_Dec')
Xgen_op = create_decoder(Zph, is_training, latentD, reuse=True, networktype=networktype + '_Dec')
# E[log P(X|z)]
rec_loss_op = tf.reduce_mean(tf.reduce_sum(tf.square(tf.subtract(Xph, Xrec_op)), reduction_indices=[1, 2, 3]))
# D_KL(Q(z|X) || P(z))
KL_loss_op = tf.reduce_mean(0.5 * tf.reduce_sum(tf.exp(z_log_sigma_sq_op) + tf.square(Zmu_op) - 1 - z_log_sigma_sq_op, reduction_indices=[1, ]))
enc_varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=networktype + '_Enc')
dec_varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=networktype + '_Dec')
total_loss_op = tf.add(rec_loss_op , KL_loss_op)
train_op = tf.train.AdamOptimizer(learning_rate=base_lr, beta1=0.9).minimize(total_loss_op, var_list=enc_varlist + dec_varlist)
logging.info('Total Trainable Variables Count in Encoder %2.3f M and in Decoder: %2.3f M.' % (count_model_params(enc_varlist) * 1e-6, count_model_params(dec_varlist) * 1e-6,))
return train_op, total_loss_op, rec_loss_op, KL_loss_op, is_training, Zph, Xph, Xrec_op, Xgen_op, Zmu_op
if __name__ == '__main__':
networktype = 'VAE_MNIST'
batch_size = 128
base_lr = 1e-3
epochs = 100
latentD = 2
work_dir = expr_dir + '%s/%s/' % (networktype, datetime.strftime(datetime.today(), '%Y%m%d'))
if not os.path.exists(work_dir): os.makedirs(work_dir)
starttime = datetime.now().replace(microsecond=0)
log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
logging.basicConfig(filename=work_dir + '%s.log' % log_name, level=logging.DEBUG, format='%(asctime)s :: %(message)s', datefmt='%Y%m%d-%H%M%S')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
logging.info('Started Training of %s at %s' % (networktype, datetime.strftime(starttime, '%Y-%m-%d_%H:%M:%S')))
logging.info('\nTraining Hyperparamters: batch_size= %d, base_lr= %1.1e, epochs= %d, latentD= %d\n' % (batch_size, base_lr, epochs, latentD))
shutil.copy2(os.path.basename(sys.argv[0]), work_dir)
data, max_iter, test_iter, test_int, disp_int = get_train_params(data_dir, batch_size, epochs=epochs, test_in_each_epoch=1, networktype=networktype)
tf.reset_default_graph()
sess = tf.InteractiveSession()
train_op, total_loss_op, rec_loss_op, KL_loss_op, is_training, Zph, Xph, Xrec_op, Xgen_op, _ = create_vae_trainer(base_lr, latentD, networktype)
tf.global_variables_initializer().run()
var_list = [var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if (networktype.lower() in var.name.lower()) and ('adam' not in var.name.lower())]
saver = tf.train.Saver(var_list=var_list, max_to_keep=int(epochs * .1))
# saver.restore(sess, expr_dir + 'ganMNIST/20170707/214_model.ckpt')
best_test_loss = np.ones([3, ]) * np.inf
train_loss = np.zeros([max_iter, 3])
test_loss = np.zeros([int(np.ceil(max_iter / test_int)), 3])
for it in range(max_iter):
Z = np.random.normal(size=[batch_size, latentD], loc=0.0, scale=1.).astype(np.float32)
if it % test_int == 0:
acc_loss = np.zeros([1, 3])
for i_test in range(test_iter):
X, Xlabels = data.test.next_batch(batch_size)
resloss = sess.run([total_loss_op, rec_loss_op, KL_loss_op], feed_dict={Xph:X, Zph: Z, is_training:False})
acc_loss = np.add(acc_loss, resloss)
test_loss[it // test_int] = np.divide(acc_loss, test_iter)
logging.info("Epoch %4d, Iteration #%4d, testing. Test Loss [total| rec| KL] = [%s]" % (data.train.epochs_completed, it, ' | '.join(['%2.5f' % a for a in test_loss[it // test_int]])))
if test_loss[it // test_int, 0] < best_test_loss[0]:
best_test_loss = test_loss[it // test_int]
logging.info("### Best Test Results Yet. Test Loss [total| rec| KL] = [%s]" % (' | '.join(['%2.5f' % a for a in test_loss[it // test_int]])))
rec_sample, gen_sample = sess.run([Xrec_op, Xgen_op], feed_dict={Xph:X, Zph: Z , is_training:False})
vis_square(rec_sample[:121], [11, 11], save_path=work_dir + 'Rec_Iter_%d.jpg' % it)
vis_square(gen_sample[:121], [11, 11], save_path=work_dir + 'Gen_Iter_%d.jpg' % it)
saver.save(sess, work_dir + "Model_Iter_%.3d.ckpt" % it)
X, _ = data.train.next_batch(batch_size)
recloss = sess.run([total_loss_op, rec_loss_op, KL_loss_op, train_op], feed_dict={Xph:X, Zph: Z, is_training:True})
train_loss[it] = recloss[:3]
if it % disp_int == 0:
logging.info("Epoch %4d, Iteration #%4d, Train Loss [total| rec| KL] = [%s]" % (data.train.epochs_completed, it, ' | '.join(['%2.5f' % a for a in train_loss[it]])))
endtime = datetime.now().replace(microsecond=0)
logging.info('Finished Training of %s at %s' % (networktype, datetime.strftime(endtime, '%Y-%m-%d_%H:%M:%S')))
logging.info('Training done in %s ! Best Test Loss [total| rec| KL] = [%s]' % (endtime - starttime, ' | '.join(['%2.5f' % a for a in best_test_loss]))) | gpl-3.0 |
raphaelvalentin/Utils | graph/smithchart.py | 1 | 9713 | import os
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib.figure import SubplotParams
from matplotlib.ticker import MaxNLocator
from os.path import isfile
import tempfile
from functions.science import linspace
__all__ = ['smithchart']
from math import floor, log10, pi
import math
import cmath
inf = float('inf')
class smithchart(object):
def __init__(self, **kwargs):
self.dpi = kwargs.get('dpi', 100)
self._caption = kwargs.get('caption', '')
self.figsize = kwargs.get('figsize', (8, 6))
self._xlabel = kwargs.get('xlabel', '')
self._ylabel = kwargs.get('ylabel', '')
self.fontsize = kwargs.get('fontsize', 19)
self._labels = list()
self._plots = list()
self.filename = kwargs.get('filename', 'image.png')
self.PY_GRAPH_DIR = os.environ.get('PY_GRAPH_DIR', '')
self.draw_cadran()
def xlabel(self, label):
self._xlabel = label
def ylabel(self, label):
self._ylabel = label
def caption(self, caption):
self._caption = caption
def draw_cadran(self):
grain=500.
# quart de cercles a S constant
Teta=linspace(0.,pi/2,step=pi/grain/2.)
S=[5., 2.,1.,0.5, 0.2,-0.2, -0.5,-1.,-2.,-5, 0.]
for s in S:
data=[]
R=np.tan(Teta)
for r in R:
d=(r+1.)**2+s**2
x=((r*r-1.)+s*s)/d
y=2*s/d
pt = complex(x,y)
if abs(pt)<1:
data.append(pt)
self.plot(np.array(data),color='grey', linestyle=':', linewidth=1)
# trace de l'abaque
# cercles a r constant
Teta=linspace(-pi/2.,pi/2.,step=pi/grain/2.)
S=np.tan(Teta)
R=[0.1, .3,0.6, 1.,2., 3.,10., 0.]
for r in R:
data=[]
for s in S:
d=s**2+(r+1.)**2
x=(s**2+(r**2-1.))/d
y=2.*(s/d)
data.append(complex(x,y))
if r==0.:
self.plot(np.array(data),color='black')
else:
self.plot(np.array(data),color='grey', linestyle=':', linewidth=1)
# ticks
s = 0.0
R=[0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20]
for r in R:
data=[]
d=s**2+(r+1.)**2
x=(s**2+(r**2-1.))/d
y=2.*(s/d)
data.append(complex(x,y+0.01))
data.append(complex(x,y-0.01))
self.plot(np.array(data),color='black', linestyle='-', linewidth=1.5)
#
self.plot(np.array([complex(-1,0), complex(1,0)]),color='black', linestyle='-', linewidth=1.5)
#
S = [0.0, 0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20]
S += [-0.1, -0.2, -0.3,-0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1, -1.2, -1.4, -1.6,-1.8,-2.0, -3., -4.,-5., -10, -20]
for s in S:
data=[]
r=0
d=(r+1.)**2+s**2
x=((r*r-1.)+s*s)/d
y=2*s/d
pt = complex(x,y)
m, phi = cmath.polar(pt)
pt = cmath.rect(m*1.03, phi)
x, y = pt.real, pt.imag
pt1 = cmath.rect(m-0.02, phi)
pt2 = cmath.rect(m, phi)
data = [pt1, pt2]
self.plot(np.array(data),color='black', linestyle='-', linewidth=1.5)
def annotate(self, plt):
R=[0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20]
for r in R:
data=[]
s = 0.0
d=s**2+(r+1.)**2
x=(s**2+(r**2-1.))/d
y=2.*(s/d)
data.append(complex(x,y+0.01))
data.append(complex(x,y-0.01))
plt.annotate(str(r), xy=(x, y+0.07), size=10, rotation=90, va="center", ha="center", )
#
S = [0.0, 0.1, 0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6,1.8,2.0, 3., 4., 5., 10, 20]
for s in S:
data=[]
r=0
d=(r+1.)**2+s**2
x=((r*r-1.)+s*s)/d
y=2*s/d
pt = complex(x,y)
m, phi = cmath.polar(pt)
m = m*1.04
pt = cmath.rect(m, phi)
x, y = pt.real, pt.imag
plt.annotate(str(s), xy=(x, y), size=10, va="center", ha="center", rotation=phi*180/pi-90 )
S = [-0.1, -0.2, -0.3,-0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1, -1.2, -1.4, -1.6,-1.8,-2.0, -3., -4.,-5., -10, -20]
for s in S:
data=[]
r=0
d=(r+1.)**2+s**2
x=((r*r-1.)+s*s)/d
y=2*s/d
pt = complex(x,y)
m, phi = cmath.polar(pt)
m = m*1.05
pt = cmath.rect(m, phi)
x, y = pt.real, pt.imag
plt.annotate(str(s), xy=(x, y), size=10, va="center", ha="center", rotation=phi*180/pi+90 )
def plot(self, c, **kwargs):
if not 'linewidth' in kwargs:
kwargs['linewidth'] = 2
if 'label' in kwargs:
if kwargs['label'] in self._labels:
del kwargs['label']
else:
self._labels.append(kwargs['label'])
self._plots.append([c.real, c.imag, kwargs])
def scatter(self, c, **kwargs):
markeredgecolor = kwargs.pop('color', 'r')
markersize = kwargs.pop('size', 6)
properties = {'marker':'s', 'markersize':markersize, 'linewidth':0,
'markerfacecolor':'none', 'markeredgecolor':markeredgecolor, 'markeredgewidth':2
}
properties.update(**kwargs)
self.plot(c, **properties)
def savefig(self, filename=None, dpi=100, force=True):
if filename == None:
filename = tempfile.mktemp(dir=self.PY_GRAPH_DIR, suffix='.png')
self.filename = filename
self.dpi = dpi
# generation of the image
plt.rc('font', family='sans-serif', size=self.fontsize)
plt.rc('figure', figsize=(8,6))
plt.rc('figure', dpi=self.dpi)
plt.rc('figure.subplot', left=0.00, bottom=0.00, right=1.0, top=1.0, wspace=0.001, hspace=0.1)
plt.rc('lines', markersize=6)
plt.rc('axes', labelsize=self.fontsize)
plt.rc('axes', color_cycle=('red', 'blue', 'green', 'black', 'grey', 'yellow'))
plt.rc('axes', grid=False)
plt.rc('axes', linewidth=0)
plt.rc('xtick.major', size=8) # major tick size in points
plt.rc('xtick.minor', size=5) # minor tick size in points
plt.rc('xtick.major', width=0) # major tick width in points
plt.rc('xtick.minor', width=0) # minor tick width in points
plt.rc('xtick.major', pad=4) # distance to major tick label in points
plt.rc('xtick', color='k') # color of the tick labels
plt.rc('xtick', labelsize=0) # fontsize of the tick labels
plt.rc('xtick', direction='in') # direction: in, out, or inout
plt.rc('ytick.major', size=1) # major tick size in points
plt.rc('ytick.minor', size=1) # minor tick size in points
plt.rc('ytick.major', width=0) # major tick width in points
plt.rc('ytick.minor', width=0) # minor tick width in points
plt.rc('ytick', labelsize=0) # fontsize of the tick labels
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
self.ax.set_xlim(-1.15,1.15)
self.ax.set_ylim(-1.15,1.15)
plt.axes().set_aspect('equal', 'datalim')
plt.axis('off')
self.ax.set_axis_off()
ax_r = plt.gca() #for each axis or whichever axis you want you should
legend = False
for plti in self._plots:
if len(plti)==3:
(x, y, parameters) = plti
plt.plot(x, y, **parameters)
elif len(plti)==4:
(x, y, linespec, parameters) = plti
plt.plot(x, y, linespec, **parameters)
if 'label' in parameters: legend = True
if len(self._plots)>0 and (force or not(isfile(self.filename))):
if legend:
plt.legend(loc=0, prop={'size':self.fontsize})
# transparent legend
leg = self.ax.legend(loc='best', fancybox=False)
leg.get_frame().set_alpha(0.5)
self.annotate(plt)
plt.draw()
plt.savefig(self.filename, dpi=self.dpi)
plt.close(self.fig)
return self.filename, self._caption
if __name__ == '__main__':
from numpy import array
plot1 = smithchart(xlabel='s11')
s = array([ complex(-0.577832859,-0.631478424),
complex(-0.872221469,0.175553879),
complex(-0.27989901,0.848322599),
complex(0.625836677,0.630661307),
complex(0.833655352,-0.25903236),
complex(0.200238299,-0.876183465),
complex(0.091123769,-0.706343188),
complex(0.511222482,-0.249041717),
complex(0.385652964,0.223033934),
complex(-0.045832001,0.354777424),
complex(-0.245491847,0.136919746),
complex(-0.193731962,-0.091411262),
complex(-0.151810832,0.097273845),
complex(0.007344177,0.147523939),
complex(0.107016177,0.034567346),
complex(0.057517023,-0.062991385),
complex(-0.029108675,-0.061496518),
complex(0.002598262,-0.004237322) ])
plot1.plot(s, label='model')
plot1.scatter(s, label='meas.')
plot1.savefig('toto.jpg')
| gpl-2.0 |
eramirem/astroML | book_figures/chapter7/fig_S_manifold_PCA.py | 3 | 4298 | """
Comparison of PCA and Manifold Learning
---------------------------------------
Figure 7.8
A comparison of PCA and manifold learning. The top-left panel shows an example
S-shaped data set (a two-dimensional manifold in a three-dimensional space).
PCA identifies three principal components within the data. Projection onto the
first two PCA components results in a mixing of the colors along the manifold.
Manifold learning (LLE and IsoMap) preserves the local structure when
projecting the data, preventing the mixing of the colors.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
from matplotlib import ticker
from sklearn import manifold, datasets, decomposition
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# generate the S-curve dataset
np.random.seed(0)
n_points = 1100
n_neighbors = 10
out_dim = 2
X, color = datasets.samples_generator.make_s_curve(n_points)
# change the proportions to emphasize the weakness of PCA
X[:, 1] -= 1
X[:, 1] *= 1.5
X[:, 2] *= 0.5
#------------------------------------------------------------
# Compute the projections
pca = decomposition.PCA(out_dim)
Y_pca = pca.fit_transform(X)
lle = manifold.LocallyLinearEmbedding(n_neighbors, out_dim, method='modified',
random_state=0, eigen_solver='dense')
Y_lle = lle.fit_transform(X)
iso = manifold.Isomap(n_neighbors, out_dim)
Y_iso = iso.fit_transform(X)
#------------------------------------------------------------
# plot the 3D dataset
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.05, right=0.95,
bottom=0.05, top=0.9)
try:
# matplotlib 1.0+ has a toolkit for generating 3D plots
from mpl_toolkits.mplot3d import Axes3D
ax1 = fig.add_subplot(221, projection='3d',
xticks=[], yticks=[], zticks=[])
ax1.scatter(X[:, 0], X[:, 1], X[:, 2], c=color,
cmap=plt.cm.jet, s=9, lw=0)
ax1.view_init(11, -73)
except:
# In older versions, we'll have to wing it with a 2D plot
ax1 = fig.add_subplot(221)
# Create a projection to mimic 3D scatter-plot
X_proj = X / (X.max(0) - X.min(0))
X_proj -= X_proj.mean(0)
R = np.array([[0.5, 0.0],
[0.1, 0.1],
[0.0, 0.5]])
R /= np.sqrt(np.sum(R ** 2, 0))
X_proj = np.dot(X_proj, R)
# change line width with depth
lw = X[:, 1].copy()
lw -= lw.min()
lw /= lw.max()
lw = 1 - lw
ax1.scatter(X_proj[:, 0], X_proj[:, 1], c=color,
cmap=plt.cm.jet, s=9, lw=lw, zorder=10)
# draw the shaded axes
ax1.fill([-0.7, -0.3, -0.3, -0.7, -0.7],
[-0.7, -0.3, 0.7, 0.3, -0.7], ec='k', fc='#DDDDDD', zorder=0)
ax1.fill([-0.3, 0.7, 0.7, -0.3, -0.3],
[-0.3, -0.3, 0.7, 0.7, -0.3], ec='k', fc='#DDDDDD', zorder=0)
ax1.fill([-0.7, 0.3, 0.7, -0.3, -0.7],
[-0.7, -0.7, -0.3, -0.3, -0.7], ec='k', fc='#DDDDDD', zorder=0)
ax1.xaxis.set_major_locator(ticker.NullLocator())
ax1.yaxis.set_major_locator(ticker.NullLocator())
#------------------------------------------------------------
# Plot the projections
subplots = [222, 223, 224]
titles = ['PCA projection', 'LLE projection', 'IsoMap projection']
Yvals = [Y_pca, Y_lle, Y_iso]
for (Y, title, subplot) in zip(Yvals, titles, subplots):
ax = fig.add_subplot(subplot)
ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.jet, s=9, lw=0)
ax.set_title(title)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
| bsd-2-clause |
detrout/debian-statsmodels | statsmodels/examples/ex_generic_mle.py | 32 | 16462 |
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print(res)
#np.allclose(res.params, probit_res.params)
print(res.params, probit_res.params)
#datal = sm.datasets.longley.load()
datal = sm.datasets.ccard.load()
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model doesn't work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print(res_norm.params)
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000,
fargs=(datal.endog, datal.exog))
print(res_norm.params)
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
print(res_norm2.params)
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print(start_params)
print(res_norm3.params)
print(res2.bse)
#print res_norm3.bse # not available
print('llf', res2.llf, res_norm3.llf)
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I don't think score was supposed to be called
'''
>>> mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None, maxiter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 316, in fit
disp=disp, retall=retall, callback=callback)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
710, in fmin_bfgs
gfk = myfprime(x0)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
103, in function_wrapper
return function(x, *args)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 240, in <lambda>
score = lambda params: -self.score(params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
return approx_fprime1(params, self.nloglike)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()
'''
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print(np.linalg.eigh(hh))
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print(grad)
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print((gradb+gradf)/2.)
print(res_norm3.model.score(res_norm3.params))
print(res_norm3.model.score(start_params))
mod_norm2.loglike(start_params/2.)
print(np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params)))
print(np.sqrt(np.diag(res_bfgs.cov_params())))
print(res_norm3.bse)
print("MLE - OLS parameter estimates")
print(res_norm3.params[:-1] - res2.params)
print("bse diff in percent")
print((res_norm3.bse[:-1] / res2.bse)*100. - 100)
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 993, in conf_int
lower = self.params - dist.ppf(1-alpha/2,self.model.df_resid) *\
AttributeError: 'MygMLE' object has no attribute 'df_resid'
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print(res_norm3.model.score_obs(res_norm3.params).shape)
jac = res_norm3.model.score_obs(res_norm3.params)
print(np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params)
jac2 = res_norm3.model.score_obs(res_norm3.params, centered=True)
print(np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac)))))
print(res_norm3.bse)
print(res2.bse)
| bsd-3-clause |
noname007/losslessh264 | plot_prior_misses.py | 40 | 1124 | # Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
| bsd-2-clause |
sbobovyc/LabNotes | SolarViz/graph_frame.py | 1 | 2244 | """
Created on September 20, 2011
@author: sbobovyc
"""
"""
Copyright (C) 2011 Stanislav Bobovych
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
try:
# Python2
import Tkinter as tk
except ImportError:
# Python3
import tkinter as tk
import solar_graph
from matplotlib import pyplot
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
class GUI_graph_frame(tk.Frame):
def __init__(self, parent, controller):
# register with controller
self.name = "graph_frame"
self.controller = controller
self.controller.register(self, self.name)
tk.Frame.__init__(self, parent, bd=2, relief=tk.FLAT, background="grey")
# create pyplot figure
self.fig = pyplot.figure()
self.fig.subplots_adjust(hspace=0.8)
# draw figure and control bar on canvas
self.canvas = FigureCanvasTkAgg(self.fig, master=self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( self.canvas, self )
toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
def plot_data(self, solar_data, sample_stride, title, xlabel, ylabel, xdata, ydata, xfunction=solar_graph.nop, yfunction=solar_graph.nop):
self.ax = pyplot.subplot(111)
self.solar_graph = solar_graph.graph(self.fig, self.ax, solar_data, sample_stride, title, xlabel, ylabel, xdata, ydata, xfunction, yfunction)
self.canvas.show()
| gpl-3.0 |
harshaneelhg/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
dismalpy/dismalpy | dismalpy/ssm/tests/test_varmax.py | 1 | 30495 | """
Tests for VARMAX models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.datasets import webuse
from dismalpy.ssm import varmax
from dismalpy.ssm.tests.results import results_varmax
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
from statsmodels.iolib.summary import forg
current_path = os.path.dirname(os.path.abspath(__file__))
var_path = 'results' + os.sep + 'results_var_stata.csv'
var_results = pd.read_csv(current_path + os.sep + var_path)
varmax_path = 'results' + os.sep + 'results_varmax_stata.csv'
varmax_results = pd.read_csv(current_path + os.sep + varmax_path)
class CheckVARMAX(object):
"""
Test Vector Autoregression against Stata's `dfactor` code (Stata's
`var` function uses OLS and not state space / MLE, so we can't get
equivalent log-likelihoods)
"""
def test_mle(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Fit with all transformations
# results = self.model.fit(method='powell', disp=-1)
results = self.model.fit(maxiter=100, disp=False)
# Fit now without transformations
self.model.enforce_stationarity = False
self.model.enforce_invertibility = False
results = self.model.fit(results.params, method='nm', maxiter=1000,
disp=False)
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
assert_allclose(results.llf, self.results.llf, rtol=1e-5)
def test_params(self):
# Smoke test to make sure the start_params are well-defined and
# lead to a well-defined model
self.model.filter(self.model.start_params)
# Similarly a smoke test for param_names
assert_equal(len(self.model.start_params), len(self.model.param_names))
# Finally make sure the transform and untransform do their job
actual = self.model.transform_params(self.model.untransform_params(self.model.start_params))
assert_allclose(actual, self.model.start_params)
# Also in the case of enforce invertibility and stationarity = False
self.model.enforce_stationarity = False
self.model.enforce_invertibility = False
actual = self.model.transform_params(self.model.untransform_params(self.model.start_params))
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
assert_allclose(actual, self.model.start_params)
def test_results(self):
# Smoke test for creating the summary
self.results.summary()
# Test cofficient matrix creation (via a different, more direct, method)
if self.model.k_ar > 0:
coefficients = np.array(self.results.params[self.model._params_ar]).reshape(self.model.k_endog, self.model.k_endog * self.model.k_ar)
coefficient_matrices = np.array([
coefficients[:self.model.k_endog, i*self.model.k_endog:(i+1)*self.model.k_endog]
for i in range(self.model.k_ar)
])
assert_equal(self.results.coefficient_matrices_var, coefficient_matrices)
else:
assert_equal(self.results.coefficient_matrices_var, None)
if self.model.k_ma > 0:
coefficients = np.array(self.results.params[self.model._params_ma]).reshape(self.model.k_endog, self.model.k_endog * self.model.k_ma)
coefficient_matrices = np.array([
coefficients[:self.model.k_endog, i*self.model.k_endog:(i+1)*self.model.k_endog]
for i in range(self.model.k_ma)
])
assert_equal(self.results.coefficient_matrices_vma, coefficient_matrices)
else:
assert_equal(self.results.coefficient_matrices_vma, None)
def test_loglike(self):
assert_allclose(self.results.llf, self.true['loglike'], rtol=1e-6)
def test_bse_oim(self):
assert_allclose(self.results.bse**2, self.true['var_oim'], atol=1e-2)
def test_aic(self):
# We only get 3 digits from Stata
assert_allclose(self.results.aic, self.true['aic'], atol=3)
def test_bic(self):
# We only get 3 digits from Stata
assert_allclose(self.results.bic, self.true['bic'], atol=3)
def test_predict(self, end, atol=1e-6, **kwargs):
# Tests predict + forecast
assert_allclose(
self.results.predict(end=end, **kwargs),
self.true['predict'],
atol=atol)
def test_dynamic_predict(self, end, dynamic, atol=1e-6, **kwargs):
# Tests predict + dynamic predict + forecast
assert_allclose(
self.results.predict(end=end, dynamic=dynamic, **kwargs),
self.true['dynamic_predict'],
atol=atol)
class CheckLutkepohl(CheckVARMAX):
def __init__(self, true, order, trend, error_cov_type, cov_type='oim',
included_vars=['dln_inv', 'dln_inc', 'dln_consump'],
**kwargs):
self.true = true
# 1960:Q1 - 1982:Q4
dta = pd.DataFrame(
results_varmax.lutkepohl_data, columns=['inv', 'inc', 'consump'],
index=pd.date_range('1960-01-01', '1982-10-01', freq='QS'))
dta['dln_inv'] = np.log(dta['inv']).diff()
dta['dln_inc'] = np.log(dta['inc']).diff()
dta['dln_consump'] = np.log(dta['consump']).diff()
endog = dta.ix['1960-04-01':'1978-10-01', included_vars]
self.model = varmax.VARMAX(endog, order=order, trend=trend,
error_cov_type=error_cov_type, **kwargs)
self.results = self.model.filter(true['params'], cov_type=cov_type)
def test_predict(self, **kwargs):
super(CheckLutkepohl, self).test_predict(end='1982-10-01', **kwargs)
def test_dynamic_predict(self, **kwargs):
super(CheckLutkepohl, self).test_dynamic_predict(end='1982-10-01', dynamic='1961-01-01', **kwargs)
class TestVAR(CheckLutkepohl):
def __init__(self):
true = results_varmax.lutkepohl_var1.copy()
true['predict'] = var_results.ix[1:, ['predict_1', 'predict_2', 'predict_3']]
true['dynamic_predict'] = var_results.ix[1:, ['dyn_predict_1', 'dyn_predict_2', 'dyn_predict_3']]
super(TestVAR, self).__init__(
true, order=(1,0), trend='nc',
error_cov_type="unstructured")
def test_bse_oim(self):
# TODO bse test failures
pass
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Check the model overview table
assert_equal(re.search(r'Model:.*VAR\(1\)', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset = i * self.model.k_endog
table = tables[i+2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.dln_inv +%.4f' % params[offset + 0], table) is None, False)
assert_equal(re.search('L1.dln_inc +%.4f' % params[offset + 1], table) is None, False)
assert_equal(re.search('L1.dln_consump +%.4f' % params[offset + 2], table) is None, False)
# Test the error covariance matrix table
table = tables[-1]
assert_equal(re.search('Error covariance matrix', table) is None, False)
assert_equal(len(table.split('\n')), 11)
params = params[self.model._params_state_cov]
names = self.model.param_names[self.model._params_state_cov]
for i in range(len(names)):
assert_equal(re.search('%s +%.4f' % (names[i], params[i]), table) is None, False)
class TestVAR_diagonal(CheckLutkepohl):
def __init__(self):
true = results_varmax.lutkepohl_var1_diag.copy()
true['predict'] = var_results.ix[1:, ['predict_diag1', 'predict_diag2', 'predict_diag3']]
true['dynamic_predict'] = var_results.ix[1:, ['dyn_predict_diag1', 'dyn_predict_diag2', 'dyn_predict_diag3']]
super(TestVAR_diagonal, self).__init__(
true, order=(1,0), trend='nc',
error_cov_type="diagonal")
def test_bse_oim(self):
# TODO bse test failures
pass
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Check the model overview table
assert_equal(re.search(r'Model:.*VAR\(1\)', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset = i * self.model.k_endog
table = tables[i+2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.dln_inv +%.4f' % params[offset + 0], table) is None, False)
assert_equal(re.search('L1.dln_inc +%.4f' % params[offset + 1], table) is None, False)
assert_equal(re.search('L1.dln_consump +%.4f' % params[offset + 2], table) is None, False)
# Test the error covariance matrix table
table = tables[-1]
assert_equal(re.search('Error covariance matrix', table) is None, False)
assert_equal(len(table.split('\n')), 8)
params = params[self.model._params_state_cov]
names = self.model.param_names[self.model._params_state_cov]
for i in range(len(names)):
assert_equal(re.search('%s +%.4f' % (names[i], params[i]), table) is None, False)
class TestVAR_measurement_error(CheckLutkepohl):
"""
Notes
-----
There does not appear to be a way to get Stata to estimate a VAR with
measurement errors. Thus this test is mostly a smoke test that measurement
errors are setup correctly: it uses the same params from TestVAR_diagonal
and sets the measurement errors variance params to zero to check that the
loglike and predict are the same.
It also checks that the state-space representation with positive
measurement errors is correct.
"""
def __init__(self):
true = results_varmax.lutkepohl_var1_diag_meas.copy()
true['predict'] = var_results.ix[1:, ['predict_diag1', 'predict_diag2', 'predict_diag3']]
true['dynamic_predict'] = var_results.ix[1:, ['dyn_predict_diag1', 'dyn_predict_diag2', 'dyn_predict_diag3']]
super(TestVAR_measurement_error, self).__init__(
true, order=(1,0), trend='nc',
error_cov_type="diagonal", measurement_error=True)
# Create another filter results with positive measurement errors
self.true_measurement_error_variances = [1., 2., 3.]
params = np.r_[true['params'][:-3], self.true_measurement_error_variances]
self.results2 = self.model.filter(params)
def test_mle(self):
# With the additional measurment error parameters, this wouldn't be
# a meaningful test
pass
def test_bse_oim(self):
# This would just test the same thing as TestVAR_diagonal.test_bse_oim
pass
def test_aic(self):
# Since the measurement error is added, the number
# of parameters, and hence the aic and bic, will be off
pass
def test_bic(self):
# Since the measurement error is added, the number
# of parameters, and hence the aic and bic, will be off
pass
def test_representation(self):
# Test that the state space representation in the measurement error
# case is correct
for name in self.model.ssm.shapes.keys():
if name == 'obs':
pass
elif name == 'obs_cov':
actual = self.results2.filter_results.obs_cov
desired = np.diag(self.true_measurement_error_variances)[:,:,np.newaxis]
assert_equal(actual, desired)
else:
assert_equal(getattr(self.results2.filter_results, name),
getattr(self.results.filter_results, name))
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Check the model overview table
assert_equal(re.search(r'Model:.*VAR\(1\)', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset = i * self.model.k_endog
table = tables[i+2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 9)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.dln_inv +%.4f' % params[offset + 0], table) is None, False)
assert_equal(re.search('L1.dln_inc +%.4f' % params[offset + 1], table) is None, False)
assert_equal(re.search('L1.dln_consump +%.4f' % params[offset + 2], table) is None, False)
assert_equal(re.search('measurement_variance +%.4g' % params[-(i+1)], table) is None, False)
# Test the error covariance matrix table
table = tables[-1]
assert_equal(re.search('Error covariance matrix', table) is None, False)
assert_equal(len(table.split('\n')), 8)
params = params[self.model._params_state_cov]
names = self.model.param_names[self.model._params_state_cov]
for i in range(len(names)):
assert_equal(re.search('%s +%.4f' % (names[i], params[i]), table) is None, False)
class TestVAR_obs_intercept(CheckLutkepohl):
def __init__(self):
true = results_varmax.lutkepohl_var1_obs_intercept.copy()
true['predict'] = var_results.ix[1:, ['predict_int1', 'predict_int2', 'predict_int3']]
true['dynamic_predict'] = var_results.ix[1:, ['dyn_predict_int1', 'dyn_predict_int2', 'dyn_predict_int3']]
super(TestVAR_obs_intercept, self).__init__(
true, order=(1,0), trend='nc',
error_cov_type="diagonal", obs_intercept=true['obs_intercept'])
def test_aic(self):
# Since the obs_intercept is added in in an ad-hoc way here, the number
# of parameters, and hence the aic and bic, will be off
pass
def test_bic(self):
# Since the obs_intercept is added in in an ad-hoc way here, the number
# of parameters, and hence the aic and bic, will be off
pass
def test_bse_oim(self):
# TODO bse test failures
pass
class TestVAR_exog(CheckLutkepohl):
# Note: unlike the other tests in this file, this is against the Stata
# var function rather than the Stata dfactor function
def __init__(self):
true = results_varmax.lutkepohl_var1_exog.copy()
true['predict'] = var_results.ix[1:75, ['predict_exog1_1', 'predict_exog1_2', 'predict_exog1_3']]
true['predict'].iloc[0, :] = 0
true['fcast'] = var_results.ix[76:, ['fcast_exog1_dln_inv', 'fcast_exog1_dln_inc', 'fcast_exog1_dln_consump']]
exog = np.arange(75) + 3
super(TestVAR_exog, self).__init__(
true, order=(1,0), trend='nc', error_cov_type='unstructured',
exog=exog, initialization='approximate_diffuse', loglikelihood_burn=1)
def test_mle(self):
pass
def test_aic(self):
# Stata's var calculates AIC differently
pass
def test_bic(self):
# Stata's var calculates BIC differently
pass
def test_bse_oim(self):
# Exclude the covariance cholesky terms
assert_allclose(
self.results.bse[:-6]**2, self.true['var_oim'], atol=1e-2)
def test_predict(self):
super(CheckLutkepohl, self).test_predict(end='1978-10-01', atol=1e-3)
def test_dynamic_predict(self):
# Stata's var cannot subsequently use dynamic
pass
def test_forecast(self):
# Tests forecast
exog = (np.arange(75, 75+16) + 3)[:, np.newaxis]
# Test it through the results class wrapper
desired = self.results.forecast(steps=16, exog=exog)
assert_allclose(desired, self.true['fcast'], atol=1e-6)
# Test it directly
beta = self.results.params[-9:-6]
state_intercept = np.concatenate([exog*beta[0], exog*beta[1], exog*beta[2]], axis=1).T
desired = super(varmax.VARMAXResultsWrapper, self.results).predict(start=75, end=75+15, state_intercept=state_intercept)
assert_allclose(desired, self.true['fcast'], atol=1e-6)
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Check the model overview table
assert_equal(re.search(r'Model:.*VARX\(1\)', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset = i * self.model.k_endog
table = tables[i+2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 9)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.dln_inv +%.4f' % params[offset + 0], table) is None, False)
assert_equal(re.search('L1.dln_inc +%.4f' % params[offset + 1], table) is None, False)
assert_equal(re.search('L1.dln_consump +%.4f' % params[offset + 2], table) is None, False)
assert_equal(re.search('beta.x1 +' + forg(params[self.model._params_regression][i], prec=4), table) is None, False)
# Test the error covariance matrix table
table = tables[-1]
assert_equal(re.search('Error covariance matrix', table) is None, False)
assert_equal(len(table.split('\n')), 11)
params = params[self.model._params_state_cov]
names = self.model.param_names[self.model._params_state_cov]
for i in range(len(names)):
assert_equal(re.search('%s +%.4f' % (names[i], params[i]), table) is None, False)
class TestVAR_exog2(CheckLutkepohl):
# This is a regression test, to make sure that the setup with multiple exog
# works correctly. The params are from Stata, but the loglike is from
# this model. Likely the small discrepancy (see the results file) is from
# the approximate diffuse initialization.
def __init__(self):
true = results_varmax.lutkepohl_var1_exog2.copy()
true['predict'] = var_results.ix[1:75, ['predict_exog2_1', 'predict_exog2_2', 'predict_exog2_3']]
true['predict'].iloc[0, :] = 0
true['fcast'] = var_results.ix[76:, ['fcast_exog2_dln_inv', 'fcast_exog2_dln_inc', 'fcast_exog2_dln_consump']]
exog = np.c_[np.ones((75,1)), (np.arange(75) + 3)[:, np.newaxis]]
super(TestVAR_exog2, self).__init__(
true, order=(1,0), trend='nc', error_cov_type='unstructured',
exog=exog, initialization='approximate_diffuse', loglikelihood_burn=1)
def test_mle(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_bse_oim(self):
pass
def test_predict(self):
super(CheckLutkepohl, self).test_predict(end='1978-10-01', atol=1e-3)
def test_dynamic_predict(self):
# Stata's var cannot subsequently use dynamic
pass
def test_forecast(self):
# Tests forecast
exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 3)[:, np.newaxis]]
desired = self.results.forecast(steps=16, exog=exog)
assert_allclose(desired, self.true['fcast'], atol=1e-6)
class TestVAR2(CheckLutkepohl):
def __init__(self):
true = results_varmax.lutkepohl_var2.copy()
true['predict'] = var_results.ix[1:, ['predict_var2_1', 'predict_var2_2']]
true['dynamic_predict'] = var_results.ix[1:, ['dyn_predict_var2_1', 'dyn_predict_var2_2']]
super(TestVAR2, self).__init__(
true, order=(2,0), trend='nc', error_cov_type='unstructured',
included_vars=['dln_inv', 'dln_inc'])
def test_bse_oim(self):
# Exclude the covariance cholesky terms
# assert_allclose(
# self.results.bse[:-3]**2, self.true['var_oim'][:-3], atol=1e-2
pass
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Check the model overview table
assert_equal(re.search(r'Model:.*VAR\(2\)', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset = i * self.model.k_endog * self.model.k_ar
table = tables[i+2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 9)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.dln_inv +%.4f' % params[offset + 0], table) is None, False)
assert_equal(re.search('L1.dln_inc +%.4f' % params[offset + 1], table) is None, False)
assert_equal(re.search('L2.dln_inv +%.4f' % params[offset + 2], table) is None, False)
assert_equal(re.search('L2.dln_inc +%.4f' % params[offset + 3], table) is None, False)
# Test the error covariance matrix table
table = tables[-1]
assert_equal(re.search('Error covariance matrix', table) is None, False)
assert_equal(len(table.split('\n')), 8)
params = params[self.model._params_state_cov]
names = self.model.param_names[self.model._params_state_cov]
for i in range(len(names)):
assert_equal(re.search('%s +%.4f' % (names[i], params[i]), table) is None, False)
class CheckFREDManufacturing(CheckVARMAX):
def __init__(self, true, order, trend, error_cov_type, cov_type='oim',
**kwargs):
self.true = true
# 1960:Q1 - 1982:Q4
dta = webuse('manufac', 'http://www.stata-press.com/data/r12/')
dta.index = dta.month
dta['dlncaputil'] = dta['lncaputil'].diff()
dta['dlnhours'] = dta['lnhours'].diff()
endog = dta.ix['1972-02-01':, ['dlncaputil', 'dlnhours']]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.model = varmax.VARMAX(endog, order=order, trend=trend,
error_cov_type=error_cov_type, **kwargs)
self.results = self.model.filter(true['params'], cov_type=cov_type)
class TestVARMA(CheckFREDManufacturing):
"""
Test against the sspace VARMA example with some params set to zeros.
"""
def __init__(self):
true = results_varmax.fred_varma11.copy()
true['predict'] = varmax_results.ix[1:, ['predict_varma11_1', 'predict_varma11_2']]
true['dynamic_predict'] = varmax_results.ix[1:, ['dyn_predict_varma11_1', 'dyn_predict_varma11_2']]
super(TestVARMA, self).__init__(
true, order=(1,1), trend='nc', error_cov_type='diagonal')
def test_mle(self):
# Since the VARMA model here is generic (we're just forcing zeros
# in some params) whereas Stata's is restricted, the MLE test isn't
# meaninful
pass
def test_bse_oim(self):
# Standard errors do not match Stata's
raise SkipTest('Known failure: standard errors do not match.')
def test_aic(self):
# Since the VARMA model here is generic (we're just putting in zeros
# for some params), Stata assumes a different estimated number of
# parameters; hence the aic and bic, will be off
pass
def test_bic(self):
# Since the VARMA model here is generic (we're just putting in zeros
# for some params), Stata assumes a different estimated number of
# parameters; hence the aic and bic, will be off
pass
def test_predict(self):
super(TestVARMA, self).test_predict(end='2009-05-01', atol=1e-4)
def test_dynamic_predict(self):
super(TestVARMA, self).test_dynamic_predict(end='2009-05-01', dynamic='2000-01-01')
def test_summary(self):
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Check the model overview table
assert_equal(re.search(r'Model:.*VARMA\(1,1\)', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset_ar = i * self.model.k_endog
offset_ma = self.model.k_endog**2 * self.model.k_ar + i * self.model.k_endog
table = tables[i+2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert_equal(re.search('Results for equation %s' % name, table) is None, False)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 9)
# -> Check that we have the right coefficients
assert_equal(re.search('L1.dlncaputil +' + forg(params[offset_ar + 0], prec=4), table) is None, False)
assert_equal(re.search('L1.dlnhours +' + forg(params[offset_ar + 1], prec=4), table) is None, False)
assert_equal(re.search(r'L1.e\(dlncaputil\) +' + forg(params[offset_ma + 0], prec=4), table) is None, False)
assert_equal(re.search(r'L1.e\(dlnhours\) +' + forg(params[offset_ma + 1], prec=4), table) is None, False)
# Test the error covariance matrix table
table = tables[-1]
assert_equal(re.search('Error covariance matrix', table) is None, False)
assert_equal(len(table.split('\n')), 7)
params = params[self.model._params_state_cov]
names = self.model.param_names[self.model._params_state_cov]
for i in range(len(names)):
assert_equal(re.search('%s +%s' % (names[i], forg(params[i], prec=4)), table) is None, False)
class TestVMA1(CheckFREDManufacturing):
"""
Test against the sspace VARMA example with some params set to zeros.
"""
def __init__(self):
true = results_varmax.fred_vma1.copy()
true['predict'] = varmax_results.ix[1:, ['predict_vma1_1', 'predict_vma1_2']]
true['dynamic_predict'] = varmax_results.ix[1:, ['dyn_predict_vma1_1', 'dyn_predict_vma1_2']]
super(TestVMA1, self).__init__(
true, order=(0,1), trend='nc', error_cov_type='diagonal')
def test_mle(self):
# Since the VARMA model here is generic (we're just forcing zeros
# in some params) whereas Stata's is restricted, the MLE test isn't
# meaninful
pass
def test_bse_oim(self):
# Standard errors do not match Stata's
raise SkipTest('Known failure: standard errors do not match.')
def test_aic(self):
# Since the VARMA model here is generic (we're just putting in zeros
# for some params), Stata assumes a different estimated number of
# parameters; hence the aic and bic, will be off
pass
def test_bic(self):
# Since the VARMA model here is generic (we're just putting in zeros
# for some params), Stata assumes a different estimated number of
# parameters; hence the aic and bic, will be off
pass
def test_predict(self):
super(TestVMA1, self).test_predict(end='2009-05-01', atol=1e-4)
def test_dynamic_predict(self):
super(TestVMA1, self).test_dynamic_predict(end='2009-05-01', dynamic='2000-01-01')
def test_specifications():
# Tests for model specification and state space creation
endog = np.arange(20).reshape(10,2)
exog = np.arange(10)
exog2 = pd.Series(exog, index=pd.date_range('2000-01-01', '2009-01-01', freq='AS'))
# Test successful model creation
mod = varmax.VARMAX(endog, exog=exog, order=(1,0))
# Test successful model creation with pandas exog
mod = varmax.VARMAX(endog, exog=exog2, order=(1,0))
def test_misspecifications():
# Tests for model specification and misspecification exceptions
endog = np.arange(20).reshape(10,2)
# Bad trend specification
assert_raises(ValueError, varmax.VARMAX, endog, order=(1,0), trend='')
# Bad error_cov_type specification
assert_raises(ValueError, varmax.VARMAX, endog, order=(1,0), error_cov_type='')
# Bad order specification
assert_raises(ValueError, varmax.VARMAX, endog, order=(0,0))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
varmax.VARMAX(endog, order=(1,1))
# Warning with VARMA specification
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
varmax.VARMAX(endog, order=(1,1))
print(w)
message = ('Estimation of VARMA(p,q) models is not generically robust,'
' due especially to identification issues.')
assert_equal(str(w[0].message), message)
warnings.resetwarnings()
| bsd-2-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | sklearn/tests/test_lda.py | 77 | 6258 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
dbjohnson/flhackday | categorical.py | 1 | 1439 | from matplotlib import colors as clr
import pylab as plt
import seaborn as sns
import numpy as np
background = -1<<30
def heatmap(image, ncolors=None, transform=True):
distinct_values = list(sorted(np.unique(image)))
if background in distinct_values:
distinct_values.remove(background)
if ncolors is None:
ncolors = len(distinct_values)
colors = sns.color_palette("hls", ncolors)
background_color = [0.95, 0.95, 0.95]
cmap_colors = colors[:]
cmap_colors.insert(0, background_color)
cmap = clr.ListedColormap(cmap_colors)
display_im = []
if transform:
for row in image:
display_im.append([background_color if val == background
else colors[distinct_values.index(val) % len(colors)] for val in row])
else:
for row in image:
display_im.append([background_color if val == background
else colors[val % len(colors)] for val in row])
rows, cols = image.shape
plt.imshow(display_im, interpolation='none', aspect='auto', extent=[0, cols, 0, rows], cmap=cmap)
plt.colorbar(ticks=[])
plt.grid(ls='solid', color='w')
plt.xticks(range(cols), [])
plt.yticks(range(rows), [])
return colors
def get_unique_values_and_index(values):
distinct_values = list(sorted(np.unique(values)))
return distinct_values, [distinct_values.index(v) for v in values] | mit |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
zooniverse/aggregation | experimental/consensus/paramSearch.py | 2 | 5713 | #!/usr/bin/env python
import os
import csv
import sys
import numpy as np
import math
if os.path.isdir("/Users/greghines/Databases/serengeti"):
baseDir = "/Users/greghines/Databases/serengeti/"
else:
baseDir = "/home/ggdhines/Databases/serengeti/"
classifications = {}
viewedSubjects = {}
individualConsistency = {}
subjectConsistency = {}
weights = {}
reader = csv.reader(open(baseDir+"goldFiltered.csv","rU"), delimiter=",")
next(reader, None)
for line in reader:
subject_zooniverse_id = line[2]
user_name = line[1]
attribute = line[11]
if not(subject_zooniverse_id in classifications):
classifications[subject_zooniverse_id] = {}
subjectConsistency[subject_zooniverse_id] = {}
if not(user_name in classifications[subject_zooniverse_id]):
if attribute is not "":
classifications[subject_zooniverse_id][user_name] = [attribute]
else:
classifications[subject_zooniverse_id][user_name] = []
subjectConsistency[subject_zooniverse_id][user_name] = 1
else:
if attribute is not "":
classifications[subject_zooniverse_id][user_name].append(attribute)
if not(user_name in viewedSubjects):
viewedSubjects[user_name] = []
individualConsistency[user_name] = 1.
weights[user_name] = 1.
if not(subject_zooniverse_id in viewedSubjects[user_name]):
viewedSubjects[user_name].append(subject_zooniverse_id)
# numViewed = []
# for subject_zooniverse_id in classifications:
# differentClassifications = []
#
# #sum up the count for each classification
# for user_name in classifications[subject_zooniverse_id]:
# c = tuple(sorted(classifications[subject_zooniverse_id][user_name]))
# if not(c in differentClassifications):
# differentClassifications.append(c)
#
# numViewed.append(len(differentClassifications))
#
# numClassifications = 0
# for user_name in viewedSubjects:
# numClassifications += len(viewedSubjects[user_name])
#
# print numClassifications
#
# print np.mean(numViewed)
# print np.median(numViewed)
# xVal = range(1,max(numViewed))
# yVal = [len([i for i in numViewed if i == x])/float(len(numViewed)) for x in xVal]
# import matplotlib.pyplot as plt
# plt.bar([x-0.5 for x in xVal],yVal)
# plt.show()
# assert False
import pylab as P
#wParam1 = 0.6
#wParam2 = 8.5
param1Range = np.arange(0.834,0.838,0.0005)
param2Range = np.arange(19.2,19.41,0.025)
meshX,meshY = np.meshgrid(param1Range,param2Range)
mesh = np.zeros((len(param1Range), len(param2Range)))
for i1,wParam1 in enumerate(param1Range):
for i2, wParam2 in enumerate(param2Range):
print (wParam1,wParam2)
results = []
for iterCount in range(2):
#first time through
for subject_zooniverse_id in classifications:
classificationCount = {}
totalClassifications = 0.
uniqueClassifications = 0.
#sum up the count for each classification
for user_name in classifications[subject_zooniverse_id]:
totalClassifications += 1
c = tuple(sorted(classifications[subject_zooniverse_id][user_name]))
w = weights[user_name]
if not(c in classificationCount):
classificationCount[c] = w
uniqueClassifications += 1
else:
classificationCount[c] += w
classificationPercentage = {c: classificationCount[c]/totalClassifications for c in classificationCount}
#now calculate the consistency values
for c in classificationCount:
subjectConsistency[subject_zooniverse_id][c] = (classificationPercentage[c] + sum([1-classificationPercentage[cPrime] for cPrime in classificationCount if (c != cPrime)]))/uniqueClassifications
if subjectConsistency[subject_zooniverse_id][c] < 0:
print classificationPercentage
assert(subjectConsistency[subject_zooniverse_id][c] >= 0)
#calculate the user average
for user_name in viewedSubjects:
totalConsistency = 0
for subject_zooniverse_id in viewedSubjects[user_name]:
c = tuple(sorted(classifications[subject_zooniverse_id][user_name]))
totalConsistency += subjectConsistency[subject_zooniverse_id][c]
assert(totalConsistency >= 0)
individualConsistency[user_name] = totalConsistency/float(len(viewedSubjects[user_name]))
#map the consistency values into weights
#weights = []
# for user_name in individualConsistency:
# try:
# weights.append(min(1., math.pow(individualConsistency[user_name]/0.6,8.5)))
# except ValueError:
# print individualConsistency[user_name]
# raise
weights = {user_name: min(1., math.pow(individualConsistency[user_name]/wParam1,wParam2)) for user_name in individualConsistency}
#results.append([individualConsistency[user_name] for user_name in individualConsistency])
results.append([weights[user_name] for user_name in weights])
#print len(individualConsistency)
from scipy.stats import ks_2samp
diff = ks_2samp(results[0], results[1])
mesh[i1][i2] = diff[0]
print mesh
import pylab as pl
pl.contourf(param2Range,param1Range,mesh)
#P.hist(results[0], 50, normed=1, histtype='step', cumulative=True)
#P.show()
pl.colorbar()
pl.show() | apache-2.0 |
Kate-Willett/HadISDH_Marine_Build | EUSTACE_SST_MAT/Combined_Uncertainty_Grids.py | 1 | 38857 | # python 3
#
# Author: Kate Willett
# Created: 18 January 2019
# Last update: 19 January 2019
# Location: /data/local/hadkw/HADCRUH2/MARINE/EUSTACEMDS/EUSTACE_SST_MAT/
# GitHub: https://github.com/Kate-Willett/HadISDH_Marine_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code creates the combined obs uncertainty, gridbox sampling uncertainty
# and full (sampling + obs) uncertainty for each gridbox
#
# Uncertainties are assessed as 1 sigma and outoput as 1 sigma!!!!
#
# NOTE THAT IT ALSO REFORMATS THE DATA TO BE ONE FILE PER VARIABLE WITH ALL RELATED FIELDS WITHIN!!!
#
# The sampling uncertainty follows the methodology applied for HadISDH-land which
# in turn follows Jones et al., 1999
#
# Willett, K. M., Williams Jr., C. N., Dunn, R. J. H., Thorne, P. W., Bell,
# S., de Podesta, M., Jones, P. D., and Parker D. E., 2013: HadISDH: An
# updated land surface specific humidity product for climate monitoring.
# Climate of the Past, 9, 657-677, doi:10.5194/cp-9-657-2013.
#
# Jones, P. D., Osborn, T. J., and Briffa, K. R.: Estimating sampling errors in large-scale temperature averages, J. Climate, 10, 2548-2568, 1997
#
# -----------------------
# LIST OF MODULES
# -----------------------
# import os
# import datetime as dt
# import numpy as np
# import sys, getopt
# import math
# from math import sin, cos, sqrt, atan2, radians
# import scipy.stats
# import matplotlib.pyplot as plt
# matplotlib.use('Agg')
# import calendar
# import gc
# import netCDF4 as ncdf
# import copy
# import pdb
#
# Kate:
# from ReadNetCDF import GetGrid
# from ReadNetCDF import GetGrid4
# import gridbox_sampling_uncertainty as gsu
#
# INTERNAL:
#
#
# -----------------------
# DATA
# -----------------------
# Land Sea mask of 5x5 grids
# /project/hadobs2/hadisdh/marine/otherdata/new_coverpercentjul08.nc
#
# Bias Corrected actual and renormalised anomalies
# actual values:
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_5x5_monthly_from_daily_*_relax.nc
# anomaly values:
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_5x5_monthly_renorm19812010_anomalies_from_daily_*_relax.nc
# uncertainty:
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_u*_5x5_monthly_from_daily_*_relax.nc
#
# Bias Corrected SHIP only actual and renormalised anomalies
# actual values:
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_5x5_monthly_from_daily_*_relax.nc
# anomaly values:
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_5x5_monthly_renorm19812010_anomalies_from_daily_*_relax.nc
# uncertainty:
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_u*_5x5_monthly_from_daily_*_relax.nc
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Annoyingly it looks like hadobs user can't access the module load scitools for some reason so I have to run from hadkw.
# This means some faffing around with saving files as hadkw then copying them over to project/hadobs2/ as hadobs
# Maybe after a test run I can run this on spice which should allow python 3 use?
#
# module load scitools/experimental-current
# python Combined_Uncertainty_Grids.py --year1 1973 --year2 2017 --month1 01 --month2 --12 --timing both (day, night) --platform ship (all)
# Not written this yet - prefer it to be a seperate file
# --Reformat flag outputs the data by variable with all related fields in it. We may want this as stand alone but I'm thinking its too big to run all in one go.
#
# -----------------------
# OUTPUT
# -----------------------
# 1 sigma uncertainty!!!
#
# Files are temporarily saved to /TMPSAVE/ within the run directory to be copied later
#
# uOBS, uSAMP (usbarSQ, urbar), uFULL
# Bias Corrected
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_uOBS_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_uSAMP_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_usbarSQ_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_urbar_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/OBSclim2BClocal_uFULL_5x5_monthly_from_daily_*_relax.nc
#
# Bias Corrected SHIP only
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_uOBS_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_uSAMP_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_usbarSQ_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_urbar_5x5_monthly_from_daily_*_relax.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/OBSclim2BClocal_uFULL_5x5_monthly_from_daily_*_relax.nc
#
# VARIABLE BASED FIELDS ????
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/MONITORING/HadISDH.marineq.1.0.0.2017f_BCship5by5_anoms8110_<nowmon><nowyear>_cf.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/MONITORING/HadISDH.marineRH.1.0.0.2017f_BCship5by5_anoms8110_<nowmon><nowyear>_cf.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/MONITORING/HadISDH.marinee.1.0.0.2017f_BCship5by5_anoms8110_<nowmon><nowyear>_cf.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/MONITORING/HadISDH.marineT.1.0.0.2017f_BCship5by5_anoms8110_<nowmon><nowyear>_cf.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/MONITORING/HadISDH.marineTd.1.0.0.2017f_BCship5by5_anoms8110_<nowmon><nowyear>_cf.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/MONITORING/HadISDH.marineTw.1.0.0.2017f_BCship5by5_anoms8110_<nowmon><nowyear>_cf.nc
# /project/hadobs2/hadisdh/marine/ICOADS.3.0.0/MONITORING/HadISDH.marineDPD.1.0.0.2017f_BCship5by5_anoms8110_<nowmon><nowyear>_cf.nc
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (18 January 2021)
# ---------
#
# Enhancements
#
# Changes
# Now passes start and end years and start and end climatology years to gridbox_sampling_uncertainty.py
#
# Bug fixes
#
#
# Version 1 (18 January 2018)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
# This builds on original IDL code written for HadISDH-land by Kate Willett calp_samplingerrorJUL2012_nofill.pro
#
################################################################################################################
# IMPORTS:
import os
import datetime as dt
import numpy as np
import sys, getopt
import math
from math import sin, cos, sqrt, atan2, radians
import scipy.stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import calendar
import gc
import netCDF4 as ncdf
import copy
import pdb
# Kate:
from ReadNetCDF import GetGrid
from ReadNetCDF import GetGrid4
import gridbox_sampling_uncertainty as gsu
################################################################################################################
# SUBROUTINES #
##################################################
# get_pseudo_stations #
##################################################
def get_pseudo_stations(NGridsArr,Nlats,Nlons,TheMDI):
'''
NGridsArr: times, lats, lons, of number of grids of 1x1 dailies going into each month
Nlats: number of latitude boxes
Nlons: number of longitude boxes
TheMDI: missing data indicator
# Work out mean n_stations (speaudo) per gridbox (month and over whole period) to pass to sampling uncertainty
# This is tricky - n_obs and n_grids could be far larger than n_stations within a 5by5 gridbox.
# I could use the number of potential 1by1 grids in a 5by5 - so 25 - this could be an over estimate in some cases
# I could just use the n_grids but this is the number of 1by1 daily grids going into the monthly - 25*30 = 750 maximum
# Using n_obs is harder to guage, especially in the buoy ERA
# Maybe for well sampled gridboxes (600+ daily 1by1 grids within a month) I use 25 and scale appropriately depending on n_grids
# Yes - sounds like a plan
# I'm generally getting fewer than 100 grids per month
# use np.linspace to get 25 steps from 1 to 600 such that the largest bin has 25 pseudo n_stations and the smallest has 1
# Make sure that grids with NO DATA have 0 '''
Pseudo_station_bins = np.round(np.linspace(25,600,24)) # integer array from 25 to 600 in 24 steps
Pseudo_station_bins = np.append(Pseudo_station_bins,6000) # now 25 elements with a huge 6000 at the end to catch all
Pseudo_station_counts = np.arange(1,26) # integer array from 1 to 25
Ntims = len(NGridsArr[:,0,0])
MeanNPointsArr = np.zeros((Nlats,Nlons),dtype = int) # mean number of pseudo stations over the whole sampling period
NPointsArr = np.zeros((Ntims,Nlats,Nlons),dtype = int) # mean number of pseudo stations per month
for lt in range(Nlats):
for ln in range(Nlons):
Gots = np.where(NGridsArr[:,lt,ln] > 0)[0]
if (len(Gots) > 0):
MeanGrids = np.mean(NGridsArr[Gots,lt,ln])
MeanNPointsArr[lt,ln] = Pseudo_station_counts[np.where(Pseudo_station_bins > np.ceil(MeanGrids))[0][0]]
# Also take the same approach for each month individually
for m,Mpt in enumerate(Gots):
MeanGrids = np.mean(NGridsArr[Mpt,lt,ln])
NPointsArr[Mpt,lt,ln] = Pseudo_station_counts[np.where(Pseudo_station_bins > np.ceil(MeanGrids))[0][0]]
#print("Check the pseudo station bit")
# This works ok
#pdb.set_trace()
return MeanNPointsArr, NPointsArr
##################################################
# calc_total_obs_unc #
##################################################
def calc_total_obs_unc(TheURArr,TheUCArr,TheUMArr,TheUSCNArr,TheUHGTArr,TheNLats,TheNLons,TheMDI):
'''Combines all obs uncertainty sources in quadrature for the gridbox
TheURArr: times, lat, lon array of whole number uncertainty with TheMDI missing data
TheUCArr: times, lat, lon array of climatology uncertainty with TheMDI missing data
TheUMArr: times, lat, lon array of measurement uncertainty with TheMDI missing data
TheUSCNArr: times, lat, lon array of non-ventilated instrument adjustment uncertainty with TheMDI missing data
TheUHGTArr: times, lat, lon array of height adjustment uncertainty with TheMDI missing data
TheNLats: scalar of number of gridbox latitude centres from SOUTH to NORTH???
TheNLons: scalar of number of gridbox longitude centres from WEST to EAST???
TheMDI: scalar missing data ID
RETURNS:
TheTotObsUncArr: times, lat, lon array of total obs uncertainty with TheMDI missing data'''
# What is the times total?
TheNTims = len(TheURArr[:,0,0])
# Set up the array to hold the total obs uncertainties
# No really necessary but good to see what we are working with
TheTotObsUncArr = np.empty((TheNTims,TheNLats,TheNLons),dtype = float)
TheTotObsUncArr.fill(TheMDI)
# np arrays can have element wise operations so we do not need to loop - hurray!
# set all Uncs MDIs to 0 so that 0**2 won't impact the combination
TheURArr[np.where(TheURArr == TheMDI)] = 0
TheUCArr[np.where(TheUCArr == TheMDI)] = 0
TheUMArr[np.where(TheUMArr == TheMDI)] = 0
TheUSCNArr[np.where(TheUSCNArr == TheMDI)] = 0
TheUHGTArr[np.where(TheUHGTArr == TheMDI)] = 0
# Combine in quadrature
TheTotObsUncArr = np.sqrt(TheURArr**2 + TheUCArr**2 + TheUMArr**2 + TheUHGTArr**2 + TheUSCNArr**2)
# Convert all 0 values to TheMDI - hope floating points don't do anything silly
TheTotObsUncArr[np.where(TheTotObsUncArr == 0.)] = TheMDI
# print("Test TheTotObsUncArr!")
# pdb.set_trace()
return TheTotObsUncArr
##################################################
# calc_full_unc #
##################################################
def calc_full_unc(TheUOBSArr,TheUSAMPArr,TheNLats,TheNLons,TheMDI):
'''Combines all total obs uncertainty and sampling uncertainty sources in quadrature for the gridbox
TheUOBSArr: times, lat, lon array of whole number uncertainty with TheMDI missing data
TheUSAMPArr: times, lat, lon array of climatology uncertainty with TheMDI missing data
TheNLats: scalar of number of gridbox latitude centres from SOUTH to NORTH???
TheNLons: scalar of number of gridbox longitude centres from WEST to EAST???
TheMDI: scalar missing data ID
This is all set up for 1 sigma uncertainty
RETURNS:
TheFullUncArr: times, lat, lon array of total obs uncertainty with TheMDI missing data'''
# What is the times total?
TheNTims = len(TheUOBSArr[:,0,0])
# Set up the array to hold the full uncertainties
# No really necessary but good to see what we are working with
TheFullUncArr = np.empty((TheNTims,TheNLats,TheNLons),dtype = float)
TheFullUncArr.fill(TheMDI)
# np arrays can have element wise operations so we do not need to loop - hurray!
# set all Uncs MDIs to 0 so that 0**2 won't impact the combination
TheUOBSArr[np.where(TheUOBSArr == TheMDI)] = 0
TheUSAMPArr[np.where(TheUSAMPArr == TheMDI)] = 0
# Combine in quadrature
TheFullUncArr = np.sqrt(TheUOBSArr**2 + TheUSAMPArr**2)
# Convert all 0 values to TheMDI - hope floating points don't do anything silly
TheFullUncArr[np.where(TheFullUncArr == 0.)] = TheMDI
TheUOBSArr[np.where(TheUOBSArr == 0)] = TheMDI
TheUSAMPArr[np.where(TheUSAMPArr == 0)] = TheMDI
# print("Test TheFullUncArr!")
# pdb.set_trace()
return TheFullUncArr
##################################################
# Write_Netcdf_Variable_Unc #
##################################################
def Write_Netcdf_Variable_Unc(uSource,outfile, var, vlong, vstandard, vunit, unc_data, TheMDI):
'''
This is basically a tweak of the utils.py version but set up to work here
Create the netcdf variable
:param str uSource: name of uncertainty source
:param obj outfile: output file object
:param list var: variable name
:param list vlong: long variable name
:param list vstandard: standard variable name
:param str vunit: unit of variable
:param np array unc_data: times, lats, lons uncertainty data to write
'''
# For sbarSQ adn SAMP there will be an n_grids variable which needs to be treated differently
if (var == 'n_grids'):
# Create the Variable but rbar and sbarSQ do not have a time dimension
if (uSource != 'sbarSQ'):
nc_var = outfile.createVariable(var, np.dtype('float'), ('time','latitude','longitude',), zlib = True, fill_value = -1) # with compression
else:
nc_var = outfile.createVariable(var, np.dtype('float'), ('latitude','longitude',), zlib = True, fill_value = -1) # with compression
nc_var.long_name = 'Number of pseudo stations within gridbox'
nc_var.standard_name = 'Number of pseudo station grids'
nc_var.units = vunit
nc_var.missing_value = -1
# We're not using masked arrays here - hope that' snot a problem
nc_var.valid_min = np.min(unc_data[np.where(unc_data > -1)])
nc_var.valid_max = np.max(unc_data[np.where(unc_data > -1)])
# For all other variables...
else:
# Create the Variable but rbar and sbarSQ do not have a time dimension
if (uSource != 'rbar') & (uSource != 'sbarSQ'):
nc_var = outfile.createVariable(var+'_'+uSource, np.dtype('float64'), ('time','latitude','longitude',), zlib = True, fill_value = TheMDI) # with compression
else:
nc_var = outfile.createVariable(var+'_'+uSource, np.dtype('float64'), ('latitude','longitude',), zlib = True, fill_value = TheMDI) # with compression
if uSource == 'SAMP':
nc_var.long_name = vlong+' GRIDBOX SAMPLING uncertainty (1 sigma)'
nc_var.standard_name = vstandard+' sampling uncertainty'
elif uSource == 'sbarSQ':
nc_var.long_name = vlong+' GRIDBOX mean station variance'
nc_var.standard_name = vstandard+' mean station variance'
elif uSource == 'rbar':
nc_var.long_name = vlong+' GRIDBOX mean intersite correlation'
nc_var.standard_name = vstandard+' mean intersite correlation'
elif uSource == 'OBS':
nc_var.long_name = vlong+' TOTAL OBSERVATION uncertainty (1 sigma)'
nc_var.standard_name = vstandard+' total obs uncertainty'
elif uSource == 'FULL':
nc_var.long_name = vlong+' FULL uncertainty (1 sigma)'
nc_var.standard_name = vstandard+' full uncertainty'
nc_var.units = vunit
nc_var.missing_value = TheMDI
# We're not using masked arrays here - hope that' snot a problem
nc_var.valid_min = np.min(unc_data[np.where(unc_data != TheMDI)])
nc_var.valid_max = np.max(unc_data[np.where(unc_data != TheMDI)])
nc_var[:] = unc_data
# print("Testing netCDF output to find why its putting MDI as 0",var)
# print(unc_data[0,0:10,0:5])
# pdb.set_trace()
return # write_netcdf_variable_unc
###################################################################
# Write_NetCDF_Unc #
###################
def Write_NetCDF_Unc(uSource, filename, data_abs, data_anoms, lats, lons, time, variables_abs, variables_anoms, long_abs, long_anoms, standard_abs, standard_anoms, unitsarr,TheMDI):
'''
This is basically a copy of utils.py version but tweaked to work here
Write the relevant fields out to a netCDF file.
:param str uSource: name of uncertainty source
:param str filename: output filename
:param list of np arrays data_abs: the uncertainty data array for anomalies [times, lats, lons] for each variable
:param list of np arrays data_anoms: the whole uncertainty data array for actuals [times, lats, lons] for each variable
:param array lats: the latitudes
:param array lons: the longitudes
:param array time: the times as TimeVar object
:param list variables_abs: the actual variables in order to output
:param list variables_anoms: the anomaly variables in order to output
:param list long_abs: the actual variables long name in order to output
:param list long_anoms: the anomaly variables long name in order to output
:param list standard_abs: the actual variables standard name in order to output
:param list standard_anoms: the anomaly variables standard name in order to output
:param list unitsarr: the variable units in order to output
:param float TheMDI: the missing data indicator
'''
# remove file
if os.path.exists(filename):
os.remove(filename)
outfile = ncdf.Dataset(filename,'w', format='NETCDF4')
# Set up dimensions - with time only for SAMP, FULL and OBS
if (uSource != 'rbar') & (uSource != 'sbarSQ'):
time_dim = outfile.createDimension('time',len(time))
lat_dim = outfile.createDimension('latitude',len(lats)) # as TRC of box edges given, size = # box centres to be written
lon_dim = outfile.createDimension('longitude',len(lons))
#***********
# set up basic variables linked to dimensions
# make time variable
if (uSource != 'rbar') & (uSource != 'sbarSQ'):
nc_var = outfile.createVariable('time', np.dtype('int'), ('time'), zlib = True) # with compression!!!
nc_var.long_name = "time since 1/1/1973 in months"
nc_var.units = "months"
nc_var.standard_name = "time"
nc_var[:] = time
# make latitude variable
nc_var = outfile.createVariable('latitude', np.dtype('float32'), ('latitude'), zlib = True) # with compression!!!
nc_var.long_name = "latitude"
nc_var.units = "degrees north"
nc_var.standard_name = "latitude"
nc_var[:] = lats
# make longitude variable
nc_var = outfile.createVariable('longitude', np.dtype('float32'), ('longitude'), zlib = True) # with compression!!!
nc_var.long_name = "longitude"
nc_var.units = "degrees east"
nc_var.standard_name = "longitude"
nc_var[:] = lons
#***********
# create variables actuals - makes 1 sigma:
# print("Test data again for MDI")
for v in range(len(variables_abs)):
var = variables_abs[v]
print(v, var)
Write_Netcdf_Variable_Unc(uSource, outfile, var, long_abs[v], standard_abs[v], unitsarr[v], data_abs[v], TheMDI)
# create variables anomalies - makes 1 sigma:
for v in range(len(variables_anoms)):
var = variables_anoms[v]
print(v, var)
Write_Netcdf_Variable_Unc(uSource, outfile, var, long_anoms[v], standard_anoms[v], unitsarr[v], data_anoms[v], TheMDI)
# Global Attributes
# Read these from file
attr_file = os.path.join(os.getcwd(), "attributes.dat")
try:
with open(attr_file,'r') as infile:
lines = infile.readlines()
except IOError:
print("Attributes file not found at " + attr_file)
raise IOError
attributes = {}
for line in lines:
split_line = line.split()
attributes[split_line[0]] = " ".join(split_line[1:])
# Set the attributes
for attr in attributes:
outfile.__setattr__(attr, attributes[attr])
outfile.date_created = dt.datetime.strftime(dt.datetime.now(), "%Y-%m-%d, %H:%M")
outfile.Conventions = 'CF-1.5'
outfile.Metadata_Conventions = 'Unidata Dataset Discovery v1.0,CF Discrete Sampling Geometries Conventions'
outfile.featureType = 'gridded'
outfile.close()
return # Write_NetCDF_Unc
################################################################################################################
# MAIN #
################################################################################################################
def main(argv):
# INPUT PARAMETERS AS STRINGS??? DOESN@T SEEM TO MATTER
year1 = '1973'
year2 = '2017'
month1 = '01' # months must be 01, 02 etc
month2 = '12'
timings = 'both' # 'day','night'
platform = 'ship' # 'all'
try:
opts, args = getopt.getopt(argv, "hi:",
["year1=","year2=","month1=","month2=","timings=","platform="])
except getopt.GetoptError:
print('Usage (as strings) Combined_Uncertainty_Grids.py --year1 <1973> --year2 <2017> '+\
'--month1 <01> --month2 <12> --timings <both> --platform <ship>')
sys.exit(2)
for opt, arg in opts:
if opt == "--year1":
try:
year1 = arg
except:
sys.exit("Failed: year1 not an integer")
elif opt == "--year2":
try:
year2 = arg
except:
sys.exit("Failed: year2 not an integer")
elif opt == "--month1":
try:
month1 = arg
except:
sys.exit("Failed: month1 not an integer")
elif opt == "--month2":
try:
month2 = arg
except:
sys.exit("Failed: month2 not an integer")
elif opt == "--timings":
try:
timings = arg
except:
sys.exit("Failed: timings not a string")
elif opt == "--platform":
try:
platform = arg
except:
sys.exit("Failed: platform not a string")
assert year1 != -999 and year2 != -999, "Year not specified."
print(year1, year2, month1, month2, timings, platform)
# Set up this run files, directories and dates/clims: years, months, ship or all
VarList = ['marine_air_temperature','dew_point_temperature','specific_humidity','vapor_pressure','relative_humidity','wet_bulb_temperature','dew_point_depression','n_grids'] # This is the ReadInfo
VarLong = ['Marine Air Temperature','Dew Point Temperature','Specific Humidity','Vapor Pressure','Relative Humidity','Wet Bulb Temperature','Dew Point Depression','n_grids'] # This is the ReadInfo
VarStandard = ['marine air temperature','dew point temperature','specific humidity','vapor pressure','relative humidity','wet bulb temperature','dew point depression','n_grids'] # This is the ReadInfo
AnomsVarList = [i+'_anomalies' for i in VarList[0:7]]
AnomsVarList.append(VarList[7])
AnomsVarLong = [i+' Anomalies' for i in VarLong[0:7]]
AnomsVarStandard = [i+' anomalies' for i in VarStandard[0:7]]
var_loop = ['T','Td','q','e','RH','Tw','DPD']
units_loop = ['degrees C','degrees C','g/kg','hPa','%rh','degrees C','degrees C','standard']
# Climatology
ClimStart = 1981
ClimEnd = 2010
# var_loop = ['T']
# gridbox_sampling_uncertainty.calc_sampling_unc IsMarine switch
IsMarine = True # In this code this should always be True!!!
# Missing Data Indicator
MDI = -1e30
# Input and Output directory:
if platform == 'ship':
WorkingDir = '/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocalship/'
else:
WorkingDir = '/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BClocal/'
# Input Files
FilAnoms = 'OBSclim2BClocal_5x5_monthly_renorm19812010_anomalies_from_daily_'+timings+'_relax.nc'
FilAbs = 'OBSclim2BClocal_5x5_monthly_renorm19812010_anomalies_from_daily_'+timings+'_relax.nc'
# It was this file but now (Feb 2021) I;ve changed the actuals in renorm19812010 files to be renormed anoms+clims
# FilAbs = 'OBSclim2BClocal_5x5_monthly_from_daily_'+timings+'_relax.nc'
FilUR = 'OBSclim2BClocal_uR_5x5_monthly_from_daily_'+timings+'_relax.nc'
FilUC = 'OBSclim2BClocal_uC_5x5_monthly_from_daily_'+timings+'_relax.nc'
FilUM = 'OBSclim2BClocal_uM_5x5_monthly_from_daily_'+timings+'_relax.nc'
FilUSCN = 'OBSclim2BClocal_uSCN_5x5_monthly_from_daily_'+timings+'_relax.nc'
FilUHGT = 'OBSclim2BClocal_uHGT_5x5_monthly_from_daily_'+timings+'_relax.nc'
# Output Files
# # If running as hadobs
OutFilUTotObs = WorkingDir+'OBSclim2BClocal_uOBS_5x5_monthly_from_daily_'+timings+'_relax.nc'
OutFilUSamp = WorkingDir+'OBSclim2BClocal_uSAMP_5x5_monthly_from_daily_'+timings+'_relax.nc'
OutFilUsbarSQ = WorkingDir+'OBSclim2BClocal_usbarSQ_5x5_monthly_from_daily_'+timings+'_relax.nc'
OutFilUrbar = WorkingDir+'OBSclim2BClocal_urbar_5x5_monthly_from_daily_'+timings+'_relax.nc'
OutFilUFull = WorkingDir+'OBSclim2BClocal_uFULL_5x5_monthly_from_daily_'+timings+'_relax.nc'
# # If running as hadkw
# OutFilUTotObs = 'TMPDIR/OBSclim2BClocal_uOBS_5x5_monthly_from_daily_'+timings+'_relax.nc'
# OutFilUSamp = 'TMPDIR/OBSclim2BClocal_uSAMP_5x5_monthly_from_daily_'+timings+'_relax.nc'
# OutFilUsbarSQ = 'TMPDIR/OBSclim2BClocal_usbarSQ_5x5_monthly_from_daily_'+timings+'_relax.nc'
# OutFilUrbar = 'TMPDIR/OBSclim2BClocal_urbar_5x5_monthly_from_daily_'+timings+'_relax.nc'
# OutFilUFull = 'TMPDIR/OBSclim2BClocal_uFULL_5x5_monthly_from_daily_'+timings+'_relax.nc'
# Set up necessary dates - dates for output are just counts of months from 0 to 54?...
StYr = int(year1)
EdYr = int(year2)
Ntims = ((EdYr + 1) - StYr) * 12
TimesArr = np.arange(Ntims) # months since January 1973
# Set up empty lists to store numpy arrays of the uncertainties
SampUncAnomsList = []
SampUncAbsList = []
sbarSQAnomsList = []
sbarSQAbsList = []
rbarAnomsList = []
rbarAbsList = []
TotObsUncAnomsList = []
TotObsUncAbsList = []
FullUncAnomsList = []
FullUncAbsList = []
#########
# Work on Sampling Uncertainty
# If we're struggling for memory then loop through each variable - this will change the format to be one file per variable now
# Otherwise I'll write a seperate program to reformat the files to match HadISDH-land. They will have to be reformatted again for CEDA
# ANOMALIES
print("Working on sampling uncertainty anomalies...")
# Open necessary files to get all variables, n_obs, anomaly values, lats, lons - hopefully this doesn't use too much memory
Filee = WorkingDir+FilAnoms
LatInfo = ['latitude']
LonInfo = ['longitude']
TmpDataList, LatList, LonList = GetGrid4(Filee,AnomsVarList,LatInfo,LonInfo)
# This comes out as:
# TmpDataList: a list of np arrays (times, lats{87.5N to 87.5S], lons[-177.5W to 177.5E])
# LatList: an NLats np array of lats centres (87.5N to 87.5S)
# LonList: an NLons np array of lons centres (87.5N to 87.5S)
# Get lat and lon counts
NLats = len(LatList)
NLons = len(LonList)
# Work out mean n_stations (speaudo) per gridbox (month and over whole period) to pass to sampling uncertainty
MeanNPointsArr, NPointsArr = get_pseudo_stations(TmpDataList[7],NLats,NLons, MDI)
#pdb.set_trace()
# Loop through each variable
for v,var in enumerate(var_loop):
print("Working on ... ",var)
# Calculate the sampling uncertainty - make this stand alone so that it can be called by HadISDH-land!!!
SESQArr, rbarArr, sbarSQArr = gsu.calc_sampling_unc(TmpDataList[v],LatList,LonList,MeanNPointsArr,NPointsArr,MDI,IsMarine,StYr,EdYr,ClimStart,ClimEnd)
SampUncAnomsList.append(SESQArr)
sbarSQAnomsList.append(sbarSQArr)
rbarAnomsList.append(rbarArr)
# # Append MeanNPointsArr to sbarSQAnomsList and NPointsArr to SampUncAnomsList
# SampUncAnomsList.append(NPointsArr)
# sbarSQAnomsList.append(MeanNPointsArr)
# Clean up
del TmpDataList
# print('Test Sampling Uncertainty Anoms:')
# pdb.set_trace()
# ABSOLUTES - THIS MAY NOT PROVIDE ANYTHING SENSIBLE
print("Working on sampling uncertainty anomalies...")
# Open necessary files to get all variables, n_obs, anomaly values, lats, lons - hopefully this doesn't use too much memory
Filee = WorkingDir+FilAbs
LatInfo = ['latitude']
LonInfo = ['longitude']
TmpDataList, LatList, LonList = GetGrid4(Filee,VarList,LatInfo,LonInfo)
# This comes out as:
# TmpDataList: a list of np arrays (times, lats{87.5N to 87.5S], lons[-177.5W to 177.5E])
# LatList: an NLats np array of lats centres (87.5N to 87.5S)
# LonList: an NLons np array of lons centres (87.5N to 87.5S)
# Pseudo station same for anoms and abs so no need to redo
# Loop through each variable
for v,var in enumerate(var_loop):
print("Working on ... ",var)
# Calculate the sampling uncertainty - make this stand alone so that it can be called by HadISDH-land!!!
SESQArr, rbarArr, sbarSQArr = gsu.calc_sampling_unc(TmpDataList[v],LatList,LonList,MeanNPointsArr,NPointsArr,MDI,IsMarine,StYr,EdYr,ClimStart,ClimEnd)
SampUncAbsList.append(SESQArr)
sbarSQAbsList.append(sbarSQArr)
rbarAbsList.append(rbarArr)
# Reset MeanNPointsArr and NPointsArr to 0 = -1
MeanNPointsArr[np.where(MeanNPointsArr == 0)] = -1
NPointsArr[np.where(NPointsArr == 0)] = -1
# Append int arrays of MeanNPointsArr to sbarSQAbsList and NPointsArr to SampUncAbsList
SampUncAbsList.append(NPointsArr.astype(int))
sbarSQAbsList.append(MeanNPointsArr.astype(int))
# Clean up
del TmpDataList
# print('Test Sampling Uncertainty Abs:')
# pdb.set_trace()
############
# Work on Total Obs uncertainty
# ANOMALIES
print("Working on total obs uncertainty anoms...")
# Open necessary files to get all variables uncertainties, lats, lons - hopefully this doesn't use too much memory
Filee = WorkingDir+FilUR
LatInfo = ['latitude']
LonInfo = ['longitude']
URAnomsVarList = [i+'_uR' for i in AnomsVarList[0:7]]
URAnomsVarList.append(AnomsVarList[7])
URDataList, LatList, LonList = GetGrid4(Filee,URAnomsVarList[0:7],LatInfo,LonInfo)
# This comes out as:
# TmpDataList: a list of np arrays (times, lats{87.5N to 87.5S], lons[-177.5W to 177.5E])
# LatList: an NLats np array of lats centres (87.5N to 87.5S)
# LonList: an NLons np array of lons centres (87.5N to 87.5S)
Filee = WorkingDir+FilUC
UCAnomsVarList = [i+'_uC' for i in AnomsVarList[0:7]]
UCAnomsVarList.append(AnomsVarList[7])
UCDataList, LatList, LonList = GetGrid4(Filee,UCAnomsVarList[0:7],LatInfo,LonInfo)
Filee = WorkingDir+FilUM
UMAnomsVarList = [i+'_uM' for i in AnomsVarList[0:7]]
UMAnomsVarList.append(AnomsVarList[7])
UMDataList, LatList, LonList = GetGrid4(Filee,UMAnomsVarList[0:7],LatInfo,LonInfo)
Filee = WorkingDir+FilUSCN
USCNAnomsVarList = [i+'_uSCN' for i in AnomsVarList[0:7]]
USCNAnomsVarList.append(AnomsVarList[7])
USCNDataList, LatList, LonList = GetGrid4(Filee,USCNAnomsVarList[0:7],LatInfo,LonInfo)
Filee = WorkingDir+FilUHGT
UHGTAnomsVarList = [i+'_uHGT' for i in AnomsVarList[0:7]]
UHGTAnomsVarList.append(AnomsVarList[7])
UHGTDataList, LatList, LonList = GetGrid4(Filee,UHGTAnomsVarList[0:7],LatInfo,LonInfo)
# Loop through each variable
for v,var in enumerate(var_loop):
print("Working on ... ",var)
# Get total obs uncertainty - Combine the obs uncertainty sources across the gridbox
TotObsUnc = calc_total_obs_unc(URDataList[v],UCDataList[v],UMDataList[v],USCNDataList[v],UHGTDataList[v],NLats,NLons,MDI)
TotObsUncAnomsList.append(TotObsUnc)
# Clean Up
del URDataList
del UCDataList
del UMDataList
del USCNDataList
del UHGTDataList
# print('Test Total Obs Uncertainty Anoms:')
# pdb.set_trace()
# ABSOLUTES
print("Working on total obs uncertainty anoms...")
# Open necessary files to get all variables uncertainties, lats, lons - hopefully this doesn't use too much memory
Filee = WorkingDir+FilUR
LatInfo = ['latitude']
LonInfo = ['longitude']
URVarList = [i+'_uR' for i in VarList[0:7]]
URVarList.append(VarList[7])
URDataList, LatList, LonList = GetGrid4(Filee,URVarList[0:7],LatInfo,LonInfo)
# This comes out as:
# TmpDataList: a list of np arrays (times, lats{87.5N to 87.5S], lons[-177.5W to 177.5E])
# LatList: an NLats np array of lats centres (87.5N to 87.5S)
# LonList: an NLons np array of lons centres (87.5N to 87.5S)
Filee = WorkingDir+FilUC
UCVarList = [i+'_uC' for i in VarList[0:7]]
UCVarList.append(VarList[7])
UCDataList, LatList, LonList = GetGrid4(Filee,UCVarList[0:7],LatInfo,LonInfo)
Filee = WorkingDir+FilUM
UMVarList = [i+'_uM' for i in VarList[0:7]]
UMVarList.append(VarList[7])
UMDataList, LatList, LonList = GetGrid4(Filee,UMVarList[0:7],LatInfo,LonInfo)
Filee = WorkingDir+FilUSCN
USCNVarList = [i+'_uSCN' for i in VarList[0:7]]
USCNVarList.append(VarList[7])
USCNDataList, LatList, LonList = GetGrid4(Filee,USCNVarList[0:7],LatInfo,LonInfo)
Filee = WorkingDir+FilUHGT
UHGTVarList = [i+'_uHGT' for i in VarList[0:7]]
UHGTVarList.append(VarList[7])
UHGTDataList, LatList, LonList = GetGrid4(Filee,UHGTVarList[0:7],LatInfo,LonInfo)
# Loop through each variable
for v,var in enumerate(var_loop):
print("Working on ... ",var)
# Get total obs uncertainty - Combine the obs uncertainty sources across the gridbox
TotObsUnc = calc_total_obs_unc(URDataList[v],UCDataList[v],UMDataList[v],USCNDataList[v],UHGTDataList[v],NLats,NLons,MDI)
TotObsUncAbsList.append(TotObsUnc)
# Clean Up
del URDataList
del UCDataList
del UMDataList
del USCNDataList
del UHGTDataList
# print('Test Total Obs Uncertainty Anoms:')
# pdb.set_trace()
##############
# Work on Full Uncertainty
# ANOMALIES
print("Working on full uncertainty anoms...")
# Loop through each variable
for v,var in enumerate(var_loop):
print("Working on ... ",var)
# Get full uncertainty - Combine the obs and sampling uncertainties across the gridbox
FullUnc = calc_full_unc(TotObsUncAnomsList[v],SampUncAnomsList[v],NLats,NLons,MDI)
FullUncAnomsList.append(FullUnc)
# print('Test Full Uncertainty Anoms:')
# pdb.set_trace()
# ABSOLUTES
print("Working on full uncertainty anoms...")
# Loop through each variable
for v,var in enumerate(var_loop):
print("Working on ... ",var)
# Get full uncertainty - Combine the obs and sampling uncertainties across the gridbox
FullUnc = calc_full_unc(TotObsUncAbsList[v],SampUncAbsList[v],NLats,NLons,MDI)
FullUncAbsList.append(FullUnc)
# print('Test Full Uncertainty Abs:')
# pdb.set_trace()
##############
# Write out as 1 sigma!!!!!
# print("Test for whether missing values are still MDI")
# pdb.set_trace()
# Write out sampling uncertainty - this has three components which are written seperately.
# For testing I'm just running temperature VarList[0], AnomsVarList[0] but this should be [0:7] for all but SAMP and sbarSQ (all of VarList to include n_grids)
Write_NetCDF_Unc('SAMP',OutFilUSamp,SampUncAbsList,SampUncAnomsList,LatList,LonList,TimesArr,
VarList,AnomsVarList[0:7],VarLong,AnomsVarLong,VarStandard,AnomsVarStandard,units_loop,MDI)
Write_NetCDF_Unc('sbarSQ',OutFilUsbarSQ,sbarSQAbsList,sbarSQAnomsList,LatList,LonList,TimesArr,
VarList,AnomsVarList[0:7],VarLong,AnomsVarLong,VarStandard,AnomsVarStandard,units_loop,MDI)
Write_NetCDF_Unc('rbar',OutFilUrbar,rbarAbsList,rbarAnomsList,LatList,LonList,TimesArr,
VarList[0:7],AnomsVarList[0:7],VarLong[0:7],AnomsVarLong,VarStandard[0:7],AnomsVarStandard,units_loop[0:7],MDI)
# Write out total obs uncertainty
Write_NetCDF_Unc('OBS',OutFilUTotObs,TotObsUncAbsList,TotObsUncAnomsList,LatList,LonList,TimesArr,
VarList[0:7],AnomsVarList[0:7],VarLong[0:7],AnomsVarLong,VarStandard[0:7],AnomsVarStandard,units_loop[0:7],MDI)
# Write out full uncertainty
Write_NetCDF_Unc('FULL',OutFilUFull,FullUncAbsList,FullUncAnomsList,LatList,LonList,TimesArr,VarList[0:7],AnomsVarList[0:7],
VarLong[0:7],AnomsVarLong,VarStandard[0:7],AnomsVarStandard,units_loop[0:7],MDI)
#########
print('And we are done!')
if __name__ == '__main__':
main(sys.argv[1:])
| cc0-1.0 |
librosa/librosa | librosa/core/harmonic.py | 2 | 13268 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Harmonic calculations for frequency representations"""
import numpy as np
import scipy.interpolate
import scipy.signal
from ..util.exceptions import ParameterError
__all__ = ["salience", "interp_harmonics"]
def salience(
S,
freqs,
h_range,
weights=None,
aggregate=None,
filter_peaks=True,
fill_value=np.nan,
kind="linear",
axis=0,
):
"""Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (e.g. STFT or CQT magnitudes).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to ``S`` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as ``harmonics``.
aggregate : function
aggregation function (default: `np.average`)
If ``aggregate=np.average``, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
`np.nan`) Only used if ``filter_peaks == True``.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
``S_sal`` will have the same shape as ``S``, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.ex('trumpet'), duration=3)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 115)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... sr=sr, y_axis='log', x_axis='time', ax=ax[0])
>>> ax[0].set(title='Magnitude spectrogram')
>>> ax[0].label_outer()
>>> img = librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time', ax=ax[1])
>>> ax[1].set(title='Salience spectrogram')
>>> fig.colorbar(img, ax=ax, format="%+2.0f dB")
"""
if aggregate is None:
aggregate = np.average
if weights is None:
weights = np.ones((len(h_range),))
else:
weights = np.array(weights, dtype=float)
S_harm = interp_harmonics(S, freqs, h_range, kind=kind, axis=axis)
if aggregate is np.average:
S_sal = aggregate(S_harm, axis=0, weights=weights)
else:
S_sal = aggregate(S_harm, axis=0)
if filter_peaks:
S_peaks = scipy.signal.argrelmax(S, axis=0)
S_out = np.empty(S.shape)
S_out.fill(fill_value)
S_out[S_peaks[0], S_peaks[1]] = S_sal[S_peaks[0], S_peaks[1]]
S_sal = S_out
return S_sal
def interp_harmonics(x, freqs, h_range, kind="linear", fill_value=0, axis=0):
"""Compute the energy at harmonics of time-frequency representation.
Given a frequency-based energy representation such as a spectrogram
or tempogram, this function computes the energy at the chosen harmonics
of the frequency axis. (See examples below.)
The resulting harmonic array can then be used as input to a salience
computation.
Parameters
----------
x : np.ndarray
The input energy
freqs : np.ndarray, shape=(X.shape[axis])
The frequency values corresponding to X's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to ``x``
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
Returns
-------
x_harm : np.ndarray, shape=(len(h_range), [x.shape])
``x_harm[i]`` will have the same shape as ``x``, and measure
the energy at the ``h_range[i]`` harmonic of each frequency.
See Also
--------
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.ex('choice'))
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr, ax=ax)
>>> ax.set(yticks=0.5 + np.arange(len(h_range)),
... yticklabels=['{:.3g}'.format(_) for _ in h_range],
... ylabel='Harmonic', xlabel='Tempo (BPM)')
We can also compute frequency harmonics for spectrograms.
To calculate sub-harmonic energy, use values < 1.
>>> y, sr = librosa.load(librosa.ex('trumpet'), duration=3)
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> fig, ax = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True)
>>> for i, _sh in enumerate(S_harm):
... img = librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log', x_axis='time',
... ax=ax.flat[i])
... ax.flat[i].set(title='h={:.3g}'.format(h_range[i]))
... ax.flat[i].label_outer()
>>> fig.colorbar(img, ax=ax, format="%+2.f dB")
"""
# X_out will be the same shape as X, plus a leading
# axis that has length = len(h_range)
out_shape = [len(h_range)]
out_shape.extend(x.shape)
x_out = np.zeros(out_shape, dtype=x.dtype)
if freqs.ndim == 1 and len(freqs) == x.shape[axis]:
harmonics_1d(
x_out, x, freqs, h_range, kind=kind, fill_value=fill_value, axis=axis
)
elif freqs.ndim == 2 and freqs.shape == x.shape:
harmonics_2d(
x_out, x, freqs, h_range, kind=kind, fill_value=fill_value, axis=axis
)
else:
raise ParameterError(
"freqs.shape={} does not match "
"input shape={}".format(freqs.shape, x.shape)
)
return x_out
def harmonics_1d(harmonic_out, x, freqs, h_range, kind="linear", fill_value=0, axis=0):
"""Populate a harmonic tensor from a time-frequency representation.
Parameters
----------
harmonic_out : np.ndarray, shape=(len(h_range), X.shape)
The output array to store harmonics
X : np.ndarray
The input energy
freqs : np.ndarray, shape=(x.shape[axis])
The frequency values corresponding to x's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to ``x``
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.ex('choice'))
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr, ax=ax)
>>> ax.set(yticks=0.5 + np.arange(len(h_range)),
... yticklabels=['{:.3g}'.format(_) for _ in h_range],
... ylabel='Harmonic')
... xlabel='Tempo (BPM)')
We can also compute frequency harmonics for spectrograms.
To calculate subharmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> fig, ax = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True)
>>> for i, _sh in enumerate(S_harm):
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log', x_axis='time', ax=ax.flat[i])
... ax.flat[i].set(title='h={:.3g}'.format(h_range[i]))
... ax.flat[i].label_outer()
"""
# Note: this only works for fixed-grid, 1d interpolation
f_interp = scipy.interpolate.interp1d(
freqs,
x,
kind=kind,
axis=axis,
copy=False,
bounds_error=False,
fill_value=fill_value,
)
idx_out = [slice(None)] * harmonic_out.ndim
# Compute the output index of the interpolated values
interp_axis = 1 + (axis % x.ndim)
# Iterate over the harmonics range
for h_index, harmonic in enumerate(h_range):
idx_out[0] = h_index
# Iterate over frequencies
for f_index, frequency in enumerate(freqs):
# Offset the output axis by 1 to account for the harmonic index
idx_out[interp_axis] = f_index
# Estimate the harmonic energy at this frequency across time
harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency)
def harmonics_2d(harmonic_out, x, freqs, h_range, kind="linear", fill_value=0, axis=0):
"""Populate a harmonic tensor from a time-frequency representation with
time-varying frequencies.
Parameters
----------
harmonic_out : np.ndarray
The output array to store harmonics
x : np.ndarray
The input energy
freqs : np.ndarray, shape=x.shape
The frequency values corresponding to each element of ``x``
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to ``x``
itself. Values less than one (e.g., 1/2) correspond to
sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
harmonics_1d
"""
idx_in = [slice(None)] * x.ndim
idx_freq = [slice(None)] * x.ndim
idx_out = [slice(None)] * harmonic_out.ndim
# This is the non-interpolation axis
ni_axis = (1 + axis) % x.ndim
# For each value in the non-interpolated axis, compute its harmonics
for i in range(x.shape[ni_axis]):
idx_in[ni_axis] = slice(i, i + 1)
idx_freq[ni_axis] = i
idx_out[1 + ni_axis] = idx_in[ni_axis]
harmonics_1d(
harmonic_out[tuple(idx_out)],
x[tuple(idx_in)],
freqs[tuple(idx_freq)],
h_range,
kind=kind,
fill_value=fill_value,
axis=axis,
)
| isc |
shirtsgroup/pygo | analysis/figure_generation/Fig7_plot_PMFQ.py | 1 | 1913 | #!/usr/bin/python2.4
import numpy
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import cPickle
import optparse
import plot_dG
import plot_dG_solution
def main():
lam = [.1, .15, .2, .25, .3, .35, .45, .5, .55, .6]#, .65, .7]
PMF_files = ['/home/edz3fz/proteinmontecarlo/results/1PGB/surface/umbrella_lambda%s/pmfQ_umbrella_325.pkl' % str(x)[1::] for x in lam]
PMF_files[-2] = '/home/edz3fz/proteinmontecarlo/results/1PGB/surface/lambda.55/pmf_325.pkl'
#PMF_files[-1] = '/home/edz3fz/proteinmontecarlo/results/1PGB/surface/lambda.6/pmf_325.pkl'
colors = cm.cool(numpy.linspace(0,1,len(lam)))
fig = plt.figure(1,(8.5,6.5))
plt.rc('text',usetex=True)
matplotlib.rc('font', family = 'serif', size=20)
for i in range(len(lam)):
f = open(PMF_files[i],'rb')
temp = cPickle.load(f)
bin_centers = cPickle.load(f)
f_i = cPickle.load(f)
df_i = cPickle.load(f)
f.close()
idx = numpy.argmin(f_i[int(.7*len(f_i))::]) + int(.7*len(f_i))
f_i -= f_i[idx]
plt.errorbar(bin_centers[1::],f_i[1::],numpy.array(df_i[0,:])[0,1::],label=r'$\lambda$ = %s' % lam[i], color=colors[i])
soln = '/home/edz3fz/proteinmontecarlo/results/1PGB/solution/pmf_325.pkl'
f = open(soln,'rb')
temp = cPickle.load(f)
bin_centers = cPickle.load(f)
f_i = cPickle.load(f)
df_i = cPickle.load(f)
f.close()
idx = numpy.argmin(f_i[int(.7*len(f_i))::]) + int(.7*len(f_i))
f_i -= f_i[idx]
plt.errorbar(bin_centers[1::],f_i[1::],numpy.array(df_i[0,:])[0,1::],label='solution', color='k')
fig.subplots_adjust(right=.8)
plt.ylabel('PMF (kcal/mol)')
plt.xlabel('Q')
plt.legend(bbox_to_anchor=(1.29,.79),prop={'size':14})
plt.savefig('/home/edz3fz/proteinmontecarlo/manuscripts/figures/Fig7_pmfQ.pdf')
plt.show()
if __name__ == '__main__':
main()
| gpl-2.0 |
hennersz/pySpace | basemap/examples/nytolondon.py | 2 | 2269 | # example demonstrating how to draw a great circle on a map.
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import sys
# setup lambert azimuthal map projection.
# create new figure
fig=plt.figure()
m = Basemap(llcrnrlon=-100.,llcrnrlat=20.,urcrnrlon=20.,urcrnrlat=60.,\
rsphere=(6378137.00,6356752.3142),\
resolution='c',area_thresh=10000.,projection='merc',\
lat_0=40.,lon_0=-20.,lat_ts=20.)
# nylat, nylon are lat/lon of New York
nylat = 40.78
nylon = -73.98
# lonlat, lonlon are lat/lon of London.
lonlat = 51.53
lonlon = 0.08
# find 1000 points along the great circle.
#x,y = m.gcpoints(nylon,nylat,lonlon,lonlat,1000)
# draw the great circle.
#m.plot(x,y,linewidth=2)
# drawgreatcircle performs the previous 2 steps in one call.
m.drawgreatcircle(nylon,nylat,lonlon,lonlat,linewidth=2,color='b')
m.drawcoastlines()
m.fillcontinents()
# draw parallels
circles = np.arange(10,90,20)
m.drawparallels(circles,labels=[1,1,0,1])
# draw meridians
meridians = np.arange(-180,180,30)
m.drawmeridians(meridians,labels=[1,1,0,1])
plt.title('Great Circle from New York to London (Mercator)')
sys.stdout.write('plotting Great Circle from New York to London (Mercator)\n')
# create new figure
fig=plt.figure()
# setup a gnomonic projection.
m = Basemap(llcrnrlon=-100.,llcrnrlat=20.,urcrnrlon=20.,urcrnrlat=60.,\
resolution='c',area_thresh=10000.,projection='gnom',\
lat_0=40.,lon_0=-45.)
# nylat, nylon are lat/lon of New York
nylat = 40.78
nylon = -73.98
# lonlat, lonlon are lat/lon of London.
lonlat = 51.53
lonlon = 0.08
# find 1000 points along the great circle.
#x,y = m.gcpoints(nylon,nylat,lonlon,lonlat,1000)
# draw the great circle.
#m.plot(x,y,linewidth=2)
# drawgreatcircle performs the previous 2 steps in one call.
m.drawgreatcircle(nylon,nylat,lonlon,lonlat,linewidth=2,color='b')
m.drawcoastlines()
m.fillcontinents()
# draw parallels
circles = np.arange(10,90,20)
m.drawparallels(circles,labels=[0,1,0,0])
# draw meridians
meridians = np.arange(-180,180,30)
m.drawmeridians(meridians,labels=[1,1,0,1])
plt.title('Great Circle from New York to London (Gnomonic)')
sys.stdout.write('plotting Great Circle from New York to London (Gnomonic)\n')
plt.show()
| gpl-3.0 |
janmedlock/HIV-95-vaccine | plots/prcc.py | 1 | 4570 | #!/usr/bin/python3
'''
Calculate PRCCs and make tornado plots.
'''
import os.path
import sys
from matplotlib import pyplot
from matplotlib import ticker
import numpy
from scipy import interpolate
import seaborn
sys.path.append(os.path.dirname(__file__)) # cwd for Sphinx.
import common
import sensitivity
import stats
sys.path.append('..')
import model
def tornado(ax, results, targets, outcome, t, parameter_samples,
colors, parameter_names = None, errorbars = False):
outcome_samples = sensitivity.get_outcome_samples(results, targets,
outcome, t)
n = numpy.shape(parameter_samples)[-1]
if parameter_names is None:
parameter_names = ['parameter[{}]'.format(i) for i in range(n)]
rho = stats.prcc(parameter_samples, outcome_samples)
CI = stats.prcc_CI(rho, len(outcome_samples))
xerr = numpy.row_stack((rho - CI[:, 0], CI[:, 1] - rho))
ix = numpy.argsort(numpy.abs(rho))
labels = [parameter_names[i] for i in ix]
c = [colors[l] for l in labels]
h = range(n)
if errorbars:
kwds = dict(xerr = xerr[:, ix],
error_kw = dict(ecolor = 'black',
elinewidth = 1.5,
capthick = 1.5,
capsize = 5,
alpha = 0.9))
else:
kwds = dict()
patches = ax.barh(h, rho[ix],
height = 1, left = 0,
align = 'center',
color = c,
edgecolor = c,
**kwds)
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:g}'))
# ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(n = 2))
ax.tick_params(labelsize = pyplot.rcParams['font.size'] + 1)
ax.tick_params(axis = 'y', pad = 35)
ax.set_yticks(h)
ax.set_ylim(- 0.5, n - 0.5)
ax.set_yticklabels(labels, horizontalalignment = 'center')
ax.grid(False, axis = 'y', which = 'both')
return patches
def tornados():
country = 'Global'
outcome = 'new_infections'
targets = [
[model.target.StatusQuo(),
model.target.UNAIDS95()],
[model.target.StatusQuo(),
model.target.Vaccine(treatment_target = model.target.StatusQuo())]]
targets = [[str(x) for x in t] for t in targets]
titles = ['95–95–95', 'Vaccine']
time = 2035
figsize = (8.5 * 0.7, 6.5)
palette = 'Dark2'
parameter_samples = model.parameters._get_samples()
# Get fancy names.
parameter_names = common.parameter_names
targets_flat = set(targets[0])
for t in targets[1 : ]:
targets_flat.update(t)
results = common.get_country_results(country, targets = targets_flat)
# Order colors by order of prccs for 1st time.
outcome_samples = sensitivity.get_outcome_samples(results, targets[0],
outcome, time)
rho = stats.prcc(parameter_samples, outcome_samples)
ix = numpy.argsort(numpy.abs(rho))[ : : -1]
labels = [parameter_names[i] for i in ix]
colors_ = seaborn.color_palette(palette, len(parameter_names))
colors = {l: c for (l, c) in zip(labels, colors_)}
nrows = 1
ncols = len(targets)
with seaborn.axes_style('whitegrid', common.rc_black_text):
fig, axes = pyplot.subplots(nrows, ncols,
figsize = figsize,
sharex = 'all')
if isinstance(axes, pyplot.Axes):
axes = [axes]
for (ax, targets_, title) in zip(axes, targets, titles):
seaborn.despine(ax = ax, top = True, bottom = True)
ax.tick_params(labelsize = pyplot.rcParams['font.size'])
tornado(ax, results, targets_, outcome, time,
parameter_samples, colors,
parameter_names = parameter_names)
ax.set_xlabel('PRCC')
# Make x-axis limits symmetric.
# xmin, xmax = ax.get_xlim()
# xabs = max(abs(xmin), abs(xmax))
xabs = 1
ax.set_xlim(- xabs, xabs)
ax.set_title(title)
fig.tight_layout(h_pad = 0, w_pad = 1)
common.savefig(fig, '{}.pdf'.format(common.get_filebase()), title = 'PRCC')
common.savefig(fig, '{}.pgf'.format(common.get_filebase()), title = 'PRCC')
common.savefig(fig, '{}.png'.format(common.get_filebase()), title = 'PRCC')
if __name__ == '__main__':
tornados()
pyplot.show()
| agpl-3.0 |
JT5D/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 9 | 1629 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
X_test = [[1], [3], [4]]
assert(np.abs(clf.predict(X_test) - [1, 3, 4]).sum() < 1.e-2) # identity
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
test = [[1], [3], [4]]
assert(np.abs(clf.predict(test) - [1, 3, 4]).sum() < 1.e-3) # identity
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 39224 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| bsd-3-clause |
dlt-rilmta/hunlp-GATE | Lang_Hungarian/resources/huntag3/tagger.py | 1 | 5018 | #!/usr/bin/python3
# -*- coding: utf-8, vim: expandtab:ts=4 -*-
import sys
import os
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
from tools import sentenceIterator, featurizeSentence, BookKeeper
class Tagger:
def __init__(self, features, transModel, options):
self._features = features
self._dataSizes = options['dataSizes']
self._transProbs = transModel
print('loading observation model...', end='', file=sys.stderr, flush=True)
self._model = joblib.load('{0}'.format(options['modelFileName']))
self._featCounter = BookKeeper(options['featCounterFileName'])
self._labelCounter = BookKeeper(options['labelCounterFileName'])
print('done', file=sys.stderr, flush=True)
def printWeights(self, n=100, outputStream=sys.stdout):
coefs = self._model.coef_
labelNoToName = self._labelCounter.noToName
featNoToName = self._featCounter.noToName
sortedFeats = sorted(featNoToName.items())
for i, label in sorted(labelNoToName.items()):
columns = ['{0}:{1}'.format(w, feat) for w, (no, feat) in sorted(zip(coefs[i, :], sortedFeats),
reverse=True)]
print('{0}\t{1}'.format(label, '\t'.join(columns[:n])), file=outputStream) # Best
# Worst -> Negative correlation
print('{0}\t{1}'.format(label, '\t'.join(sorted(columns[-n:], reverse=True))), file=outputStream)
def tagFeatures(self, data):
senFeats = []
senCount = 0
for line in data:
line = line.strip()
if len(line) == 0:
senCount += 1
tagging = self._tagSenFeats(senFeats)
yield [[tag] for tag in tagging]
senFeats = []
if senCount % 1000 == 0:
print('{0}...'.format(senCount), end='', file=sys.stderr, flush=True)
senFeats.append(line.split())
print('{0}...done'.format(senCount), file=sys.stderr, flush=True)
def tagDir(self, dirName):
for fn in os.listdir(dirName):
print('processing file {0}...'.format(fn), end='', file=sys.stderr, flush=True)
for sen, _ in self.tagCorp(open(os.path.join(dirName, fn), encoding='UTF-8')):
yield sen, fn
def tagCorp(self, inputStream=sys.stdin):
senCount = 0
for sen, comment in sentenceIterator(inputStream):
senCount += 1
senFeats = featurizeSentence(sen, self._features)
bestTagging = self._tagSenFeats(senFeats)
taggedSen = [tok + [bestTagging[c]] for c, tok in enumerate(sen)] # Add tagging to sentence
yield taggedSen, comment
if senCount % 1000 == 0:
print('{0}...'.format(senCount), end='', file=sys.stderr, flush=True)
print('{0}...done'.format(senCount), file=sys.stderr, flush=True)
def _getTagProbsByPos(self, senFeats):
# Get Sentence Features translated to numbers and contexts in two steps
getNoTag = self._featCounter.getNoTag
featNumbers = [{getNoTag(feat) for feat in feats if getNoTag(feat) is not None} for feats in senFeats]
rows = []
cols = []
data = []
for rownum, featNumberSet in enumerate(featNumbers):
for featNum in featNumberSet:
rows.append(rownum)
cols.append(featNum)
data.append(1)
contexts = csr_matrix((data, (rows, cols)), shape=(len(featNumbers), self._featCounter.numOfNames()),
dtype=self._dataSizes['dataNP'])
tagProbsByPos = [{self._labelCounter.noToName[i]: prob for i, prob in enumerate(probDist)}
for probDist in self._model.predict_proba(contexts)]
return tagProbsByPos
def toCRFsuite(self, inputStream, outputStream=sys.stdout):
senCount = 0
getNoTag = self._featCounter.getNoTag
featnoToName = self._featCounter.noToName
for sen, comment in sentenceIterator(inputStream):
senCount += 1
senFeats = featurizeSentence(sen, self._features)
# Get Sentence Features translated to numbers and contexts in two steps
for featNumberSet in ({getNoTag(feat) for feat in feats if getNoTag(feat) is not None}
for feats in senFeats):
print('\t'.join(featnoToName[featNum].replace(':', 'colon') for featNum in featNumberSet),
file=outputStream)
print(file=outputStream) # Sentence separator blank line
if senCount % 1000 == 0:
print('{0}...'.format(str(senCount)), end='', file=sys.stderr, flush=True)
print('{0}...done'.format(str(senCount)), file=sys.stderr, flush=True)
def _tagSenFeats(self, senFeats):
return self._transProbs.tagSent(self._getTagProbsByPos(senFeats))
| gpl-3.0 |
phdowling/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/user_interfaces/embedding_in_wx5.py | 12 | 1586 | # Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
import wx
import wx.aui
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as Toolbar
class Plot(wx.Panel):
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
self.canvas = Canvas(self, -1, self.figure)
self.toolbar = Toolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas,1,wx.EXPAND)
sizer.Add(self.toolbar, 0 , wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
class PlotNotebook(wx.Panel):
def __init__(self, parent, id = -1):
wx.Panel.__init__(self, parent, id=id)
self.nb = wx.aui.AuiNotebook(self)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self,name="plot"):
page = Plot(self.nb)
self.nb.AddPage(page,name)
return page.figure
def demo():
app = wx.PySimpleApp()
frame = wx.Frame(None,-1,'Plotter')
plotter = PlotNotebook(frame)
axes1 = plotter.add('figure 1').gca()
axes1.plot([1,2,3],[2,1,4])
axes2 = plotter.add('figure 2').gca()
axes2.plot([1,2,3,4,5],[2,1,4,2,3])
#axes1.figure.canvas.draw()
#axes2.figure.canvas.draw()
frame.Show()
app.MainLoop()
if __name__ == "__main__": demo()
| mit |
nss350/magPy | tests/testsRobust.py | 1 | 6127 | import sys
import os
sys.path.append(os.path.join('..','utils'))
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from utilsRobust import *
######################
### VARIOUS TESTS FOR UTILS ROBUST
######################
def test_mestimate():
mean = 0
std = 5
x = np.arange(1000)
y = np.random.normal(mean, std, x.size)
ones = np.ones(shape=(x.size))
# add large outliers
# numOutliers = 450
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
y[index] = np.random.randint(std*4, std*20)
# compute mean
mean = np.average(y)
standardDev = np.std(y)
# compute mad
med = sampleMedian(y)
mad = sampleMAD(y)
# mestimates
mestLocation, mestScale = mestimate(y)
# plot
plt.figure()
plt.scatter(x, y, color='y')
plt.plot(x, ones*mean, lw = 2, color="b", label="mean")
plt.plot(x, ones*standardDev, lw = 2, color="b", ls="dashed")
plt.plot(x, ones*med, lw = 2, color="g", label="median")
plt.plot(x, ones*mad, lw = 2, color="g", ls="dashed")
plt.plot(x, ones*mestLocation, lw = 2, color="r", label="mest")
plt.plot(x, ones*mestScale, lw = 2, color="r", ls="dashed")
plt.legend()
plt.show()
def test_mestimateModel():
# let's generate some data
x = np.arange(1000)
y = np.arange(-50, 50, 0.1)
# create a linear function of this
z = 2.5*x + y
# let's add some noise
mean = 0
std = 3
noise = np.random.normal(0, 3, x.size)
# print noise.shape
z = z + noise
# now add some outliers
numOutliers = 80
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
z[index] = np.random.randint(std*4, std*20)
A = np.transpose(np.vstack((x, y)))
# now try and do a robust regression
components = mestimateModel(A, z)
print components
# plt.figure()
# plt.plot()
def testRobustRegression():
# random seed
np.random.seed(0)
# the function
x = np.arange(150)
y = 12 + 0.5*x
# noise
mean = 0
std = 3
noise = np.random.normal(mean, 3*std, x.size)
# add noise
yNoise = y + noise
# now add some outliers
numOutliers = 30
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
yNoise[index] = yNoise[index] + np.random.randint(-1000, 1000)
# now add some outliers
xNoise = np.array(x)
numOutliers = 30
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
xNoise[index] = x[index] + np.random.randint(-5000, 5000)
xNoise = xNoise.reshape((x.size,1))
# lets use m estimate
paramsM, residsM, scaleM, weightsM = mestimateModel(xNoise, yNoise, intercept=True)
# lets use mm estimate
paramsMM, residsMM, scaleMM, weightsMM = mmestimateModel(xNoise, yNoise, intercept=True)
# lets test chatterjee machler
paramsCM, residsCM, weightsCM = chatterjeeMachler(xNoise, yNoise, intercept=True)
# lets test chatterjee machler mod
paramsModCM, residsModCM, weightsModCM = chatterjeeMachlerMod(xNoise, yNoise, intercept=True)
# let's plot Pdiag
plt.figure()
n, bins, patches = plt.hist(Pdiag, 50, normed=0, facecolor='green', alpha=0.75)
# try and predict
yM = paramsM[0] + paramsM[1]*x
yMM = paramsMM[0] + paramsMM[1]*x
yCM = paramsCM[0] + paramsCM[1]*x
yCM_mod = paramsModCM[0] + paramsModCM[1]*x
plt.figure()
plt.scatter(x, y, marker="s", color="black")
plt.scatter(xNoise, yNoise)
plt.plot(x, yM)
plt.plot(x, yMM)
plt.plot(x, yCM)
plt.plot(x, yCM_mod)
plt.legend(["M estimate", "MM estimate", "chatterjeeMachler", "chatterjeeMachlerMod"], loc="lower left")
plt.show()
def testRobustRegression2D():
# random seed
np.random.seed(0)
numPts = 300
# the function
x1 = np.arange(numPts, dtype="float")
x2 = 10*np.cos(2.0*np.pi*10*x1/np.max(x1))
y = 12 + 0.5*x1 + 3*x2
# noise
mean = 0
std = 3
noise = np.random.normal(mean, 3*std, numPts)
# add noise
yNoise = y + noise
# now add some outliers
numOutliers = 140
for i in xrange(0, numOutliers):
index = np.random.randint(0, numPts)
yNoise[index] = yNoise[index] + np.random.randint(-100, 100)
# now add some outliers
x1Noise = np.array(x1)
x2Noise = np.array(x2)
numOutliers = 5
for i in xrange(0, numOutliers):
index = np.random.randint(0, numPts)
x1Noise[index] = x1[index] + np.random.randint(-500, 500)
index = np.random.randint(0, numPts)
x2Noise[index] = x2[index] + np.random.randint(-500, 500)
x1Noise = x1Noise.reshape((x1.size,1))
x2Noise = x2Noise.reshape((x2.size,1))
X = np.hstack((x1Noise, x2Noise))
# lets use m estimate
paramsM, residsM, scaleM, weightsM = mestimateModel(X, yNoise, intercept=True)
# lets use mm estimate
paramsMM, residsMM, scaleMM, weightsMM = mmestimateModel(X, yNoise, intercept=True)
# lets test chatterjee machler
paramsCM, residsCM, weightsCM = chatterjeeMachler(X, yNoise, intercept=True)
# lets test chatterjee machler mod
paramsModCM, residsModCM, weightsModCM = chatterjeeMachlerMod(X, yNoise, intercept=True)
# lets test chatterjee machler hadi
paramsCMHadi, residsCMHadi, weightsCMHadi = chatterjeeMachlerHadi(X, yNoise, intercept=True)
# try and predict
yM = paramsM[0] + paramsM[1]*x1 + paramsM[2]*x2
yMM = paramsMM[0] + paramsMM[1]*x1 + paramsMM[2]*x2
yCM = paramsCM[0] + paramsCM[1]*x1 + paramsCM[2]*x2
yCM_mod = paramsModCM[0] + paramsModCM[1]*x1 + paramsModCM[2]*x2
yCM_Hadi = paramsCMHadi[0] + paramsCMHadi[1]*x1 + paramsCMHadi[2]*x2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x1, x2, y, marker="s", color="black")
ax.scatter(x1Noise, x2Noise, yNoise, marker="*", s=50, color="goldenrod")
# plt.plot(x1, x2, zs=yM)
plt.plot(x1, x2, zs=yMM)
# plt.plot(x1, x2, zs=yCM)
plt.plot(x1, x2, zs=yCM_mod)
# plt.plot(x1, x2, zs=yCM_Hadi)
# plt.legend(["M estimate", "MM estimate", "chatterjeeMachler", "chatterjeeMachlerMod", "chatterjeeMachlerHadi"], loc="lower left")
plt.legend(["MM estimate", "chatterjeeMachlerMod"], loc="lower left")
plt.show()
#test_mestimate()
# test_mestimateModel()
# testRobustRegression()
testRobustRegression2D() | apache-2.0 |
joshsalvi/Misc | SelectAdmissionNote_McLean_Single_FINAL.py | 1 | 8201 |
# coding: utf-8
# # Extract text of Admit Notes
# #### Start with patient MRN and the Admission Date.
# #### This requires 4 sequential sql pulls:
# 1) Find the PatientEncounterID for the hospital admission.
# 2) Find the set of NoteIDs associated with the ZPatientID (PatientEncounterID missing from Notes.
# 3) Find set of NoteIDs associated with ZPatientID AND date of admission.
# 4) Find the text associated with the NoteIDs.
# After get notes, isolate Psychiatric Admission Note and Admission H&P
#
# In[1]:
import base64
import os
import sqlalchemy
import getpass
import sys
import datetime
import pandas as pd
import re
import numpy as np
from scipy import stats
userid = getpass.getuser()
print(userid)
pswd = getpass.getpass('Provide password to connect:')
connection_str ="mssql+pymssql://PARTNERS\\" + str(userid) + ":" + pswd + "@PHSEDW.partners.org"
engine = sqlalchemy.create_engine(connection_str)
#does not establish DBAPI connection UNTIL engine.connect called
# Input MRN, Admit Date
# In[4]:
MRNFile=pd.read_csv('EPIC_GetNotesEDW.csv')
MRNFile.head()
MRN = MRNFile['MRN']
#MRN.to_csv('MRNString.csv', index=False)
#MRNAll = open('MRNString.csv', 'r')
#mrnNote = MRNAll.read()
#mrnNote = mrnNote.splitlines()
#AdmitDate = MRNFile['DateAdmit']
AdmitDate = '2018-08-24'
#AdmitDate.to_csv('DateAdmit.csv', index=False)
#admitDateNote = open('DateAdmit.csv', 'r')
#dateNote = admitDateNote.read()
#dateNote = dateNote.splitlines()
#Don't need any of above
#only do few patients at time
lengthMRN = len(MRN)
lengthAdmitDate = len(AdmitDate)
#test first patient of 1,277 patients
#MRNCurrList = [number for number in range(0,1)]
#dateCurrList = [number for number in range(0,1)]
##After ran script and obtained all notes, 1,255 left (excluded few who were past admits)
MRNCurrList = [number for number in range(308,309)]
#dateCurrList = [number for number in range(0,lengthAdmitDate)]
print(MRN[MRNCurrList])
#print(AdmitDate[dateCurrList])
print(AdmitDate)
# ## Since can't split for loop across multiple cells, will define steps:
# ### 1) Find ZPatient ID (MCL tables do not always have PatientEncounterID for the hospital admission, which would be ideal)
# ### 2) Two inner joins: use ZPatient ID to get Note ID's associated with patient (Note Table). Link NoteIDs to Date of Service (NoteText Table)
# ### 3) Each patient has many notes. Select only McLean (112) and date of admission (AdmitDate). Get rid of duplicate notes (with same NoteID). Each patient will still have multiple notes
# ### 4) Get notes corresponding to Note ID's. Search for CEC Admission Note
# In[5]:
#1) Find ZPatientID and PatientEncounterID
#set up the query;
for whichPt in MRNCurrList:
sql_string = """
SELECT
ptenchosp.PatientEncounterID,
id.PatientIdentityID,
id.PatientID,
ptenchosp.ExpectedAdmissionDTS
FROM
Epic.Encounter.PatientEncounterHospital_McLean AS ptenchosp
INNER JOIN
Epic.Patient.Identity_McLean AS id
ON
id.PatientID = ptenchosp.PatientID
WHERE
id.IdentityTypeID = 112
AND id.PatientIdentityID = '{}'
AND CAST(ptenchosp.HospitalAdmitDTS AS date) = '{}'
ORDER BY
ptenchosp.HospitalAdmitDTS
"""
#run the query, inserting the parameters into the query
with engine.connect() as cxn:
currMRN = MRN[whichPt]
currAdmitDate = AdmitDate
#currAdmitDate = AdmitDate[whichPt]
print(currMRN)
print(currAdmitDate)
PtEncounterID = pd.io.sql.read_sql(sql_string.format(currMRN, currAdmitDate), cxn)
#print(PtEncounterID)
#display a warning if there were multiple admissions; try taking this out
if len(PtEncounterID) > 1:
warn_string = 'Warning: More than one admission for {} on {}. Using most recent admission on that date.'
#print(warn_string.format(MRN, AdmitDate))
ZPatientID = PtEncounterID.iloc[0]['PatientID']
print(ZPatientID)
#pick out the PatientEncounterID
PtEncounterID = PtEncounterID.iloc[0]['PatientEncounterID'] #Use index 0 for first admission; -1 for last admission
PtEncounterID = int(PtEncounterID)
#print(PtEncounterID)
#2. Two inner joins: use ZPatient ID to get Note ID's associated with patient (Note Table).
#Link NoteIDs to Date of Service (NoteText Table)
#set up the query
sql_string2 = """
SELECT
notes.NoteID,
id.PatientID,
id.PatientIdentityID,
id.IdentityTypeID,
notetext.ContactDTS
FROM
Epic.Clinical.Note_McLean AS notes
INNER JOIN Epic.Patient.Identity_McLean AS id ON id.PatientID = notes.PatientLinkID
INNER JOIN Epic.Clinical.NoteText_McLean AS notetext ON notes.NoteID = notetext.NoteID
WHERE
notes.PatientLinkID = '{}'
ORDER BY
notes.NoteID
"""
#print(sql_string2)
#run the query, inserting the parameters into the query
with engine.connect() as cxn:
NoteID = pd.io.sql.read_sql(sql_string2.format(ZPatientID), cxn)
#found there were many duplicate NoteID's for some patients
#3. Convert to dataframe.
#Next use dataframe Note ID to select McLean notes (112) and date of admission (AdmitDate)
#Get rid of duplicates (keep first)
NoteIDFrame = pd.DataFrame(NoteID)
#get rid of non-McLean notes first
NoteIDFrame = NoteIDFrame.where(NoteIDFrame['IdentityTypeID'] == 112.0)
NoteIDFrame['ContactDTS'] = pd.to_datetime(NoteIDFrame['ContactDTS'], unit='s')
NoteIDFrame = NoteIDFrame.where(NoteIDFrame['ContactDTS'] == AdmitDate)
NoteIDFrame = NoteIDFrame.dropna()
NoteIDFrame = NoteIDFrame.drop_duplicates(subset='NoteID', keep='first')
#sort by Note ID
NoteIDFrame = NoteIDFrame.sort_values(by='NoteID')
#renumber indices, drop=True gets rid of old indices
NoteIDFrame = NoteIDFrame.reset_index(drop=True)
#print(NoteIDFrame)
#get list of note ID for patient
listNoteID = list(NoteIDFrame['NoteID'])
#determine number of notes for patient that occurred on Day of Admit
numberNotes = len(listNoteID)
#print(listNoteID)
#4) Get Notes corresponding to Note ID's
#set up the query
sql_string = """
SELECT
NoteTXT
FROM
Epic.Clinical.NoteText_McLean
WHERE
NoteID = '{}'
ORDER BY
LineNBR
"""
#print(sql_string)
#run the query, inserting the parameters into the query
#filename MRN_NoteID
#search each note for Medical Admission Note and Psychiatric Admission Note in first line
noteCounter = 0
for patientList in listNoteID:
if noteCounter < 6:
with engine.connect() as cxn:
NoteText = pd.io.sql.read_sql(sql_string.format(patientList), cxn)
fulltext = NoteText.NoteTXT.str.cat()
filename = [str(MRN[whichPt]) +'_' + str(patientList) +'.txt']
filename = "".join(filename)
#print(filename)
f = open(filename, 'w')
f.write(fulltext)
f.close()
f = open(filename, 'r')
CECnote = f.readline()
psychNote = re.findall('McLean Clinical Evaluation Center Psychiatric Admission Note', CECnote)
medNote = re.findall('CEC Medical Admission Note', CECnote)
if len(psychNote) > 0:
noteCounter = noteCounter + 1
psychFileName = ['PsychAdmit_' + str(MRN[whichPt]) + '.txt']
psychFileName = "".join(psychFileName)
print(psychFileName)
os.rename(filename, psychFileName)
#f = open(psychFileName, 'w')
#f.write(fulltext)
#f.close()
if len(medNote) > 0:
noteCounter = noteCounter + 1
medFileName = ['MedAdmit_' +str(MRN[whichPt]) +'.txt']
medFileName = "".join(medFileName)
print(medFileName)
os.rename(filename, medFileName)
#f = open(medFileName, 'w')
#f.write(fulltext)
#f.close()
| bsd-2-clause |
TheWeiTheTruthAndTheLight/senior-design | src/spark/main(ML).py | 1 | 4810 | import json
import pickle
from pprint import pprint
import re
from os import listdir, SEEK_END
import datetime
import random
from scipy.sparse import vstack
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
import nlp
import ml
import numpy as np
from dvs import DictVectorizerPartial
import scipy
import pyspark
from pyspark.sql import SQLContext
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, NaiveBayes
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.evaluation import BinaryClassificationMetrics
def filterComments(generator):
import nlp
import ml
list_re = [
r"(\/sarcasm)",
r"(<\/?sarcasm>)",
r"(#sarcasm)",
r"(\s*\/s\s*$)"
]
sarcasm_re = re.compile('|'.join(list_re))
pop = []
for comment in generator:
try:
text = comment['body'].lower()
if 10 <= len(text) <= 120:
if sarcasm_re.search(text) is not None:
yield (True, ml.flattenDict(nlp.feature(comment['body'], nlp.cleanTokensReddit)))
else:
pop.append(comment['body'])
if len(pop) == 1800:
yield (False, ml.flattenDict(nlp.feature(random.choice(pop), nlp.cleanTokensReddit)))
pop = []
except:
pass
def getVocab(gen):
for sarc, features in gen:
for key in features:
yield key
def v(gen, dv):
from pyspark.mllib.regression import LabeledPoint
for s, f in gen:
yield LabeledPoint(s, dv.transform([f]).tocsc().transpose())
def vectorize(gen, dv):
blocksize = 100000
sarclst = []
featlst = []
for sarc, features in gen:
sarclst.append(sarc)
featlst.append(features)
if len(sarclst) == blocksize:
yield (sarclst, dv.transform(featlst))
sarclst = []
featlst = []
yield (sarclst, dv.transform(featlst))
def train(gen):
for sarclst, matrix in gen:
y = np.array(sarclst)
X = matrix
result = ml.trainTest(X, y, classifiers=[LogisticRegression(n_jobs=-1)], reduce=0, splits=4, trainsize=0.8, testsize=0.2)
print result
yield result
def gerkin(gen):
for result in gen:
yield pickle.dumps(result)
if __name__=='__main__':
sc = pyspark.SparkContext()
sqlContext = SQLContext(sc)
df_rdd = sqlContext.read.format('json').load('/scratch/redditSarcasm/*')
print "Read df"
rdd = df_rdd.rdd
print "made rdd"
print "Reducing and transforming"
features = rdd.mapPartitions(filterComments)
print "Done reducing and transforming"
vocab = dict(features.mapPartitions(getVocab).distinct().zipWithIndex().collect())
print "Gathering Vocab"
dvp = DictVectorizerPartial(vocab=vocab)
if True:
vdvp = lambda gen: vectorize(gen, dvp)
csrs = features.mapPartitions(vdvp)
train, test = csrs.randomSplit([0.9,0.1])
mb = MultinomialNB()
for sarc, mat in train.collect():
mb.partial_fit(mat,sarc,classes=np.array([True,False]))
lsm = test.collect()
ls, lm = zip(*lsm)
testsarc = [item for sublist in ls for item in sublist]
testmatrix = vstack(lm)
score = mb.score(testsarc, testmatrix)
print "Score:\t"+str(score)
sc.parallelize([pickle.dumps(mb)]).saveAsTextFile('/user/jfeinma00/mb'+str(score))
if False:
vdvp = lambda gen: vectorize(gen, dvp)
csrs = features.mapPartitions(vdvp)
print "Collecting and saving X y"
trained = csrs.mapPartitions(train)
dill = trained.mapPartitions(gerkin)
dill.saveAsTextFile('/user/jfeinma00/logistic')
if False:
topoints = lambda gen: v(gen, dvp)
labeledpoints = features.mapPartitions(topoints)
print "Created labeledpoints"
training, test = labeledpoints.randomSplit([0.8,0.2])
print "Split data into test and train"
model = LogisticRegressionWithLBFGS.train(training, iterations=3)
#model = NaiveBayes.train(training)
print "Trained model"
predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))
print "Got prediction values for test set"
accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()
print('model accuracy {}'.format(accuracy))
metrics = BinaryClassificationMetrics(predictionAndLabels)
print("Area under PR = %s" % metrics.areaUnderPR)
print("Area under ROC = %s" % metrics.areaUnderROC)
model.save(sc, '/user/jfeinma00/lr%s'%str(accuracy))
sc.p(vocab.items()).saveAsTextFile('/user/jfeinma00/dvp')
| mit |
magnastrazh/NEUCOGAR | nest/serotonin/research/C/nest-2.10.0/pynest/nest/raster_plot.py | 12 | 6855 | # -*- coding: utf-8 -*-
#
# raster_plot.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import numpy
import pylab
def extract_events(data, time=None, sel=None):
"""
Extracts all events within a given time interval or are from a
given set of neurons.
- data is a matrix such that
data[:,0] is a vector of all gids and
data[:,1] a vector with the corresponding time stamps.
- time is a list with at most two entries such that
time=[t_max] extracts all events with t< t_max
time=[t_min, t_max] extracts all events with t_min <= t < t_max
- sel is a list of gids such that
sel=[gid1, ... , gidn] extracts all events from these gids.
All others are discarded.
Both time and sel may be used at the same time such that all
events are extracted for which both conditions are true.
"""
val = []
if time:
t_max = time[-1]
if len(time) > 1:
t_min = time[0]
else:
t_min = 0
for v in data:
t = v[1]
gid = v[0]
if time and (t < t_min or t >= t_max):
continue
if not sel or gid in sel:
val.append(v)
return numpy.array(val)
def from_data(data, title=None, hist=False, hist_binwidth=5.0, grayscale=False, sel=None):
"""
Plot raster from data array
"""
ts = data[:, 1]
d = extract_events(data, sel=sel)
ts1 = d[:, 1]
gids = d[:, 0]
return _make_plot(ts, ts1, gids, data[:, 0], hist, hist_binwidth, grayscale, title)
def from_file(fname, title=None, hist=False, hist_binwidth=5.0, grayscale=False):
"""
Plot raster from file
"""
if nest.is_iterable(fname):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
return from_data(data, title, hist, hist_binwidth, grayscale)
def from_device(detec, title=None, hist=False, hist_binwidth=5.0, grayscale=False, plot_lid=False):
"""
Plot raster from spike detector
"""
if not nest.GetStatus(detec)[0]["model"] == "spike_detector":
raise nest.NESTError("Please provide a spike_detector.")
if nest.GetStatus(detec, "to_memory")[0]:
ts, gids = _from_memory(detec)
if not len(ts):
raise nest.NESTError("No events recorded!")
if plot_lid:
gids = [nest.GetLID([x]) for x in gids]
if title is None:
title = "Raster plot from device '%i'" % detec[0]
if nest.GetStatus(detec)[0]["time_in_steps"]:
xlabel = "Steps"
else:
xlabel = "Time (ms)"
return _make_plot(ts, ts, gids, gids, hist, hist_binwidth, grayscale, title, xlabel)
elif nest.GetStatus(detec, "to_file")[0]:
fname = nest.GetStatus(detec, "filenames")[0]
return from_file(fname, title, hist, hist_binwidth, grayscale)
else:
raise nest.NESTError("No data to plot. Make sure that either to_memory or to_file are set.")
def _from_memory(detec):
ev = nest.GetStatus(detec, "events")[0]
return ev["times"], ev["senders"]
def _make_plot(ts, ts1, gids, neurons, hist, hist_binwidth, grayscale, title, xlabel=None):
"""
Generic plotting routine that constructs a raster plot along with
an optional histogram (common part in all routines above)
"""
pylab.figure()
if grayscale:
color_marker = ".k"
color_bar = "gray"
else:
color_marker = "."
color_bar = "blue"
color_edge = "black"
if xlabel is None:
xlabel = "Time (ms)"
ylabel = "Neuron ID"
if hist:
ax1 = pylab.axes([0.1, 0.3, 0.85, 0.6])
plotid = pylab.plot(ts1, gids, color_marker)
pylab.ylabel(ylabel)
pylab.xticks([])
xlim = pylab.xlim()
pylab.axes([0.1, 0.1, 0.85, 0.17])
t_bins = numpy.arange(numpy.amin(ts), numpy.amax(ts), float(hist_binwidth))
n, bins = _histogram(ts, bins=t_bins)
num_neurons = len(numpy.unique(neurons))
heights = 1000 * n / (hist_binwidth * num_neurons)
pylab.bar(t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge)
pylab.yticks([int(x) for x in numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)])
pylab.ylabel("Rate (Hz)")
pylab.xlabel(xlabel)
pylab.xlim(xlim)
pylab.axes(ax1)
else:
plotid = pylab.plot(ts1, gids, color_marker)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
if title is None:
pylab.title("Raster plot")
else:
pylab.title(title)
pylab.draw()
return plotid
def _histogram(a, bins=10, bin_range=None, normed=False):
from numpy import asarray, iterable, linspace, sort, concatenate
a = asarray(a).ravel()
if bin_range is not None:
mn, mx = bin_range
if mn > mx:
raise ValueError("max must be larger than min in range parameter")
if not iterable(bins):
if bin_range is None:
bin_range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in bin_range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins, endpoint=False)
else:
if (bins[1:] - bins[:-1] < 0).any():
raise ValueError("bins must increase monotonically")
# best block size probably depends on processor cache size
block = 65536
n = sort(a[:block]).searchsorted(bins)
for i in range(block, a.size, block):
n += sort(a[i:i + block]).searchsorted(bins)
n = concatenate([n, [len(a)]])
n = n[1:] - n[:-1]
if normed:
db = bins[1] - bins[0]
return 1.0 / (a.size * db) * n, bins
else:
return n, bins
def show():
"""
Call pylab.show() to show all figures and enter the GUI main loop.
Python will block until all figure windows are closed again.
You should call this function only once at the end of a script.
See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show
"""
pylab.show()
| gpl-2.0 |
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam | PyFoam/Basics/RunDatabase.py | 1 | 10646 | # ICE Revision: $Id: $
"""
Collects data about runs in a small SQLite database
"""
# don't look at it too closely. It's my first sqlite-code
import sqlite3
from os import path
import datetime
import re
import sys
from PyFoam.Error import error
from .CSVCollection import CSVCollection
from PyFoam.ThirdParty.six import print_,iteritems,integer_types
from PyFoam.ThirdParty.six import u as uniCode
class RunDatabase(object):
"""
Database with information about runs. To be queried etc
"""
separator="//"
def __init__(self,
name,
create=False,
verbose=False):
"""@param name: name of the file
@param create: should the database be created if it does not exist"""
self.verbose=verbose
if not path.exists(name):
if create==False:
error("Database",name,"does not exist")
else:
self.initDatabase(name)
self.db=sqlite3.connect(name)
self.db.row_factory=sqlite3.Row
def initDatabase(self,name):
"""Create a new database file"""
db=sqlite3.connect(name)
with db:
db.row_factory=sqlite3.Row
cursor=db.cursor()
cursor.execute("CREATE TABLE theRuns(runId INTEGER PRIMARY KEY, "+
self.__normalize("insertionTime")+" TIMESTAMP)")
cursor.close()
def add(self,data):
"""Add a dictionary with data to the database"""
self.__adaptDatabase(data)
runData=dict([("insertionTime",datetime.datetime.now())]+ \
[(k,v) for k,v in iteritems(data) if type(v)!=dict])
runID=self.__addContent("theRuns",runData)
subtables=dict([(k,v) for k,v in iteritems(data) if type(v)==dict])
for tn,content in iteritems(subtables):
self.__addContent(tn+"Data",
dict(list(self.__flattenDict(content).items())+
[("runId",runID)]))
self.db.commit()
specialChars={
'[':'bro',
']':'brc',
'{':'cro',
'}':'crc',
'(':'pro',
')':'prc',
'|':'pip',
}
specialString="_specialChar"
def __normalize(self,s):
"""Normalize a column-name so that the case-insensitve column-names of SQlite
are no problem"""
if s in ["runId","dataId"]:
return s
result=""
for c in s:
if c.isupper() or c=="_":
result+="_"+c.lower()
elif c in RunDatabase.specialChars:
result+=RunDatabase.specialString+RunDatabase.specialChars[c]
else:
result+=c
return result
def __denormalize(self,s):
"""Denormalize the column name that was normalized by _normalize"""
while s.find(RunDatabase.specialString)>=0:
pre,post=s.split(RunDatabase.specialString,maxsplit=1)
spec=post[0:3]
for k,v in iteritems(RunDatabase.specialChars):
if spec==v:
s=pre+k+post[3:]
break
else:
error("No special character for encoding",spec,"found")
result=""
underFound=False
for c in s:
if underFound:
underFound=False
result+=c.upper()
elif c=="_":
underFound=True
else:
result+=c
if underFound:
error("String",s,"was not correctly encoded")
return result
def __addContent(self,table,data):
cursor=self.db.cursor()
runData={}
for k,v in iteritems(data):
if k=="runId":
runData[k]=v
elif isinstance(v,integer_types+(float,)):
runData[k]=float(v)
else:
runData[k]=uniCode(str(v))
cols=self.__getColumns(table)[1:]
addData=[]
for c in cols:
try:
addData.append(runData[c])
except KeyError:
addData.append(None)
addData=tuple(addData)
cSQL = "insert into "+table+" ("+ \
",".join(['"'+self.__normalize(c)+'"' for c in cols])+ \
") values ("+",".join(["?"]*len(addData))+")"
if self.verbose:
print_("Execute SQL",cSQL,"with",addData)
try:
cursor.execute(cSQL, addData)
except Exception:
e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'
print_("SQL-Expression:",cSQL)
print_("AddData:",addData)
raise e
lastrow=cursor.lastrowid
cursor.close()
return lastrow
def __adaptDatabase(self,data):
"""Make sure that all the required columns and tables are there"""
c=self.db.execute('SELECT name FROM sqlite_master WHERE type = "table"')
tables=[ x["name"] for x in c.fetchall() ]
indata=dict([(k,v) for k,v in iteritems(data) if type(v)!=dict])
subtables=dict([(k,v) for k,v in iteritems(data) if type(v)==dict])
self.__addColumnsToTable("theRuns",indata)
for tn,content in iteritems(subtables):
if tn+"Data" not in tables:
if self.verbose:
print_("Adding table",tn)
self.db.execute("CREATE TABLE "+tn+"Data (dataId INTEGER PRIMARY KEY, runId INTEGER)")
self.__addColumnsToTable(tn+"Data",
self.__flattenDict(content))
def __flattenDict(self,oData,prefix=""):
data=[(prefix+k,v) for k,v in iteritems(oData) if type(v)!=dict]
subtables=dict([(k,v) for k,v in iteritems(oData) if type(v)==dict])
for name,val in iteritems(subtables):
data+=list(self.__flattenDict(val,prefix+name+self.separator).items())
if self.verbose:
print_("Flattened",oData,"to",data)
return dict(data)
def __getColumns(self,tablename):
c=self.db.execute('SELECT * from '+tablename)
result=[]
for desc in c.description:
if desc[0] in ['dataId','runId']:
result.append(desc[0])
else:
result.append(self.__denormalize(desc[0]))
return result
def __addColumnsToTable(self,table,data):
columns=self.__getColumns(table)
for k,v in iteritems(data):
if k not in columns:
if self.verbose:
print_("Adding:",k,"to",table,"(normalized:",
self.__normalize(k),")")
if isinstance(v,integer_types+(float,)):
self.db.execute('ALTER TABLE "%s" ADD COLUMN "%s" REAL' %
(table,self.__normalize(k)))
else:
self.db.execute('ALTER TABLE "%s" ADD COLUMN "%s" TEXT' %
(table,self.__normalize(k)))
def dumpToCSV(self,
fname,
selection=None,
disableRunData=None,
pandasFormat=True,
excel=False):
"""Dump the contents of the database to a csv-file
@param name: the CSV-file
@param selection: list of regular expressions. Only data
entries fitting those will be added to the CSV-file (except
for the basic run). If unset all data will be written"""
file=CSVCollection(fname)
runCursor=self.db.cursor()
runCursor.execute("SELECT * from theRuns")
c=self.db.execute('SELECT name FROM sqlite_master WHERE type = "table"')
tables=[ x["name"] for x in c.fetchall() ]
allData=set()
writtenData=set()
disabledStandard=set()
for d in runCursor:
id=d['runId']
if self.verbose:
print_("Dumping run",id)
for k in list(d.keys()):
writeEntry=True
if disableRunData:
for e in disableRunData:
exp=re.compile(e)
if not exp.search(self.__denormalize(k)) is None:
writeEntry=False
break
if writeEntry:
file[k]=d[k]
else:
disabledStandard.add(k)
for t in tables:
if t=="theRuns":
namePrefix="runInfo"
else:
namePrefix=t[:-4]
dataCursor=self.db.cursor()
dataCursor.execute("SELECT * FROM "+t+" WHERE runId=?",
(str(id),))
data=dataCursor.fetchall()
if len(data)>1:
error(len(data),"data items found for id ",id,
"in table",t,".Need exactly 1")
elif len(data)<1:
continue
for k in list(data[0].keys()):
if k in ["dataId","runId"]:
continue
if k in disabledStandard:
continue
name=namePrefix+self.separator+self.__denormalize(k)
allData.add(name)
writeEntry=True
if selection:
writeEntry=False
for e in selection:
exp=re.compile(e)
if exp.search(name):
writeEntry=True
break
if writeEntry:
writtenData.add(name)
file[name]=data[0][k]
file.write()
if self.verbose:
sep="\n "
if allData==writtenData:
print_("Added all data entries:",sep,sep.join(sorted(allData)),sep="")
else:
print_("Added parameters:",sep,sep.join(sorted(writtenData)),
"\nUnwritten data:",sep,sep.join(sorted(allData-writtenData)),sep="")
if len(disabledStandard)>0:
print_("Disabled standard entries:",sep,sep.join(sorted(disabledStandard)),sep="")
f=file(pandasFormat)
if excel:
file(True).to_excel(fname)
if not f is None:
return f
else:
# retry by forcing to numpy
return file(False)
# Should work with Python3 and Python2
| gpl-2.0 |
deepchem/deepchem | deepchem/metrics/__init__.py | 3 | 1950 | # flake8: noqa
# metric class
from deepchem.metrics.metric import Metric
# metrics utils
from deepchem.metrics.metric import threshold_predictions
from deepchem.metrics.metric import normalize_weight_shape
from deepchem.metrics.metric import normalize_labels_shape
from deepchem.metrics.metric import normalize_prediction_shape
from deepchem.metrics.metric import handle_classification_mode
from deepchem.metrics.metric import to_one_hot
from deepchem.metrics.metric import from_one_hot
# sklearn & scipy score function
from deepchem.metrics.score_function import matthews_corrcoef
from deepchem.metrics.score_function import recall_score
from deepchem.metrics.score_function import kappa_score
from deepchem.metrics.score_function import cohen_kappa_score
from deepchem.metrics.score_function import r2_score
from deepchem.metrics.score_function import mean_squared_error
from deepchem.metrics.score_function import mean_absolute_error
from deepchem.metrics.score_function import precision_score
from deepchem.metrics.score_function import precision_recall_curve
from deepchem.metrics.score_function import auc
from deepchem.metrics.score_function import jaccard_score
from deepchem.metrics.score_function import f1_score
from deepchem.metrics.score_function import roc_auc_score
from deepchem.metrics.score_function import accuracy_score
from deepchem.metrics.score_function import balanced_accuracy_score
from deepchem.metrics.score_function import pearsonr
# original score function
from deepchem.metrics.score_function import pearson_r2_score
from deepchem.metrics.score_function import jaccard_index
from deepchem.metrics.score_function import pixel_error
from deepchem.metrics.score_function import prc_auc_score
from deepchem.metrics.score_function import rms_score
from deepchem.metrics.score_function import mae_score
from deepchem.metrics.score_function import bedroc_score
from deepchem.metrics.score_function import concordance_index
| mit |
codrut3/tensorflow | tensorflow/python/estimator/canned/dnn_linear_combined_test.py | 46 | 26964 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)
def _dnn_only_model_fn(self,
features,
labels,
mode,
head,
hidden_units,
feature_columns,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined._dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=[],
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
# A function to mimic linear-regressor init reuse same tests.
def _linear_regressor_fn(feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyRegressorPartitionerTest(
linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorEvaluationTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorIntegrationTest(
linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorTrainingTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn)
def _linear_classifier_fn(feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyClassifierTrainingTest(
linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierClassesEvaluationTest(
linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierPredictTest(
linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierIntegrationTest(
linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class DNNLinearCombinedRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
# A function to mimic dnn-classifier init reuse same tests.
def _dnn_classifier_fn(hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
# A function to mimic dnn-regressor init reuse same tests.
def _dnn_regressor_fn(hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 2
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes-1., batch_size * input_dimension,
dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNLinearCombinedTests(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, real_optimizer, var_name_prefix):
"""Verifies global_step is None and var_names start with given prefix."""
def _minimize(loss, global_step=None, var_list=None):
self.assertIsNone(global_step)
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
var_names = [var.name for var in trainable_vars]
self.assertTrue(
all([name.startswith(var_name_prefix) for name in var_names]))
# var is used to check this op called by training.
with ops.name_scope(''):
var = variables_lib.Variable(0., name=(var_name_prefix + '_called'))
with ops.control_dependencies([var.assign(100.)]):
return real_optimizer.minimize(loss, global_step, var_list)
optimizer_mock = test.mock.NonCallableMagicMock(
spec=optimizer_lib.Optimizer, wraps=real_optimizer)
optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)
return optimizer_mock
def test_train_op_calls_both_dnn_and_linear(self):
opt = gradient_descent.GradientDescentOptimizer(1.)
x_column = feature_column.numeric_column('x')
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[0.], [1.]])},
y=np.array([[0.], [1.]]),
batch_size=1,
shuffle=False)
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[x_column],
# verifies linear_optimizer is used only for linear part.
linear_optimizer=self._mock_optimizer(opt, 'linear'),
dnn_hidden_units=(2, 2),
dnn_feature_columns=[x_column],
# verifies dnn_optimizer is used only for linear part.
dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
model_dir=self._model_dir)
est.train(input_fn, steps=1)
# verifies train_op fires linear minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'linear_called'))
# verifies train_op fires dnn minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'dnn_called'))
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
fernandezcuesta/pySMSCMon | test/unit_tests/test_dftools.py | 2 | 11466 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
*t4mon* - T4 monitoring **test functions** for df_tools.py
"""
from __future__ import absolute_import
import tempfile
import unittest
import numpy as np
import pandas as pd
import pytest
from t4mon import df_tools, collector
from pandas.util.testing import assert_frame_equal
from . import base
TEST_CSV_SHAPE = (286, 930) # dataframe shape as generated from CSV
TEST_PKL_SHAPE = (286, 942) # dataframe shape after calculations (as stored)
TEST_PLAINCSV = 'test/plain_data.csv'
class TestAuxiliaryFunctions(unittest.TestCase):
""" Test auxiliary functions, do not require setting anything up
"""
@classmethod
def setUpClass(cls):
collector.add_methods_to_pandas_dataframe(base.LOGGER)
def test_reload_from_csv(self):
""" Test loading a dataframe from CSV file (plain or T4 format)
"""
# Test with a T4-CSV
df1 = df_tools.reload_from_csv(base.TEST_CSV)
self.assertTupleEqual(df1.shape, TEST_CSV_SHAPE)
# Test with a plain CSV
with tempfile.NamedTemporaryFile() as plaincsv:
df1.to_csv(plaincsv.name)
plaincsv.file.close()
df2 = df_tools.reload_from_csv(plaincsv.name, plain=True)
assert_frame_equal(df1, df2)
def test_t4csv_to_plain(self):
""" Test T4 to plain CSV conversion """
with tempfile.NamedTemporaryFile() as test_plain:
df_tools.t4csv_to_plain(base.TEST_CSV, output=test_plain.name)
with open(TEST_PLAINCSV, 'r') as this, open(test_plain.name,
'r') as that:
self.assertTrue(this.read(), that.read())
def test_plain_to_t4csv(self):
""" Test conversion from plain to T4-CSV """
with tempfile.NamedTemporaryFile() as that:
df_tools.plain_to_t4csv(TEST_PLAINCSV, output=that.name)
with open(TEST_PLAINCSV, 'r') as this:
self.assertTrue(this.read(), that.read())
@pytest.mark.xfail(reason='Statistically possible to fail, random numbers')
def test_remove_outliers(self):
""" Test removing outliers from a dataframe """
df1 = pd.DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df2 = df1.copy()
# Add some outliers in random positions
a = range(len(df2))
np.random.shuffle(a)
rnd_row = a[:1]
a = df2.columns.values
rnd_col = a[:3]
df2.update(df2[rnd_col].ix[rnd_row] * 99.0)
df2 = df_tools.remove_outliers(df2, n_std=2)
assert_frame_equal(df1.drop(rnd_row, axis=0), df2)
class TestDFTools(base.BaseTestClass):
""" Set of test functions for df_tools.py """
def test_extract_t4csv(self):
""" Test function for _extract_t4csv """
with open(base.TEST_CSV, 'r') as filedescriptor:
(fields, data) = df_tools._extract_t4csv(filedescriptor)
self.assertIsInstance(fields, list)
self.assertIsInstance(data, list)
self.assertEqual(len(fields), len(data[0].split(df_tools.SEPARATOR)))
self.assertEquals(set([len(fields)]),
set([len(row.split(df_tools.SEPARATOR))
for row in data]))
# Specific to this particular test file
self.assertIn('Counter07_HANDOVER_RQST', fields)
self.assertIn('Sample Time', fields)
self.assertIn('[DISK_BCK0]%Used', fields)
self.assertIn('Counter01_message_External_Failure', fields)
def test_select(self):
""" Test function for select """
# Extract non existing -> empty
self.assertTrue(df_tools.select(self.test_data,
'NONEXISTING_COLUMN',
logger=self.logger).empty)
# Extract none -> original
assert_frame_equal(self.test_data, df_tools.select(self.test_data,
'',
logger=self.logger))
# Extract none, filtering by a non-existing system
assert_frame_equal(pd.DataFrame(), df_tools.select(self.test_data,
system='BAD_ID',
logger=self.logger))
# Extract filtering by an existing system
self.assertTupleEqual(df_tools.select(self.test_data,
system='SYSTEM_1',
logger=self.logger).shape,
TEST_PKL_SHAPE)
# Extract an empty DF should return empty DF
assert_frame_equal(pd.DataFrame(), df_tools.select(pd.DataFrame(),
logger=self.logger))
# Specific for test data
self.assertEqual(df_tools.select(self.test_data,
'Above_Peek',
logger=self.logger).shape[1],
12)
self.assertEqual(df_tools.select(self.test_data,
'Counter0',
logger=self.logger).shape[1],
382)
# Bad additional filter returns empty dataframe
assert_frame_equal(df_tools.select(self.test_data,
'Above_Peek',
position='UP', # wrong filter
logger=self.logger),
pd.DataFrame())
# When a wrong variable is selected, it is ignored
self.assertEqual(df_tools.select(self.test_data,
'I_do_not_exist',
'Above_Peek',
logger=self.logger).shape[1],
12)
def test_todataframe(self):
""" Test function for to_dataframe """
with open(base.TEST_CSV, 'r') as testcsv:
(field_names, data) = df_tools._extract_t4csv(testcsv)
dataframe = df_tools.to_dataframe(field_names, data)
self.assertIsInstance(dataframe, pd.DataFrame)
self.assertTupleEqual(dataframe.shape, TEST_CSV_SHAPE)
# Missing header should return an empty DF
self.assertTrue(df_tools.to_dataframe([], data).empty)
# # Missing data should return an empty DF
self.assertTrue(df_tools.to_dataframe(field_names, []).empty)
my_df = df_tools.to_dataframe(['COL1', 'My Sample Time'],
['7, 2000-01-01 00:00:01',
'23, 2000-01-01 00:01:00',
'30, 2000-01-01 00:01:58'])
self.assertEqual(my_df['COL1'].sum(), 60)
self.assertIsInstance(my_df.index, pd.DatetimeIndex)
def test_todataframe_raises_exception_if_no_datetime_column_found(self):
"""
Test to_dataframe when a no header passed matching the datetime tag
"""
with open(base.TEST_CSV, 'r') as testcsv:
(field_names, data) = df_tools._extract_t4csv(testcsv)
# fake the header
df_timecol = next(s for s in field_names if
df_tools.DATETIME_TAG in s)
field_names[field_names.index(df_timecol)] = 'time_index'
with self.assertRaises(df_tools.ToDfError):
df_tools.to_dataframe(field_names, data)
def test_dataframize(self):
""" Test function for dataframize """
dataframe = df_tools.dataframize(base.TEST_CSV, logger=self.logger)
self.assertTupleEqual(dataframe.shape, TEST_CSV_SHAPE)
# test with a non-T4Format CSV, should return empty DF
with tempfile.NamedTemporaryFile() as plaincsv:
dataframe.to_csv(plaincsv.name)
plaincsv.file.close()
assert_frame_equal(pd.DataFrame(),
df_tools.dataframize(plaincsv.name))
# test when file does not exist
assert_frame_equal(pd.DataFrame(),
df_tools.dataframize('non-existing-file'))
def test_consolidate_data(self):
""" Test dataframe consolidation function """
midx = pd.MultiIndex(levels=[[0, 1, 2, 3, 4], ['sys1']],
labels=[[0, 1, 2, 3, 4], [0, 0, 0, 0, 0]],
names=[df_tools.DATETIME_TAG, 'system'])
df1 = pd.DataFrame(np.random.randint(0, 10, (5, 3)),
columns=['A', 'B', 'C'])
df1.index.name = df_tools.DATETIME_TAG
df1_midx = df1.set_index(midx)
df2 = pd.DataFrame(np.random.randint(0, 10, (5, 3)),
columns=['A', 'B', 'C'])
df2.index.name = df_tools.DATETIME_TAG
# Consolidate with nothing should raise a ToDfError
with self.assertRaises(df_tools.ToDfError):
df_tools.consolidate_data(df1)
# Consolidate with a system name should return MultiIndex dataframe
assert_frame_equal(df_tools.consolidate_data(df1.copy(),
system='sys1'),
df1_midx)
data = pd.DataFrame()
for (i, partial_dataframe) in enumerate([df1, df2]):
data = df_tools.consolidate_data(partial_dataframe,
dataframe=data,
system='sys{}'.format(i + 1))
self.assertTupleEqual(data.shape, (10, 3))
self.assertTupleEqual(data.index.levshape, (5, 2))
assert_frame_equal(df1, data.xs('sys1', level='system'))
assert_frame_equal(df2, data.xs('sys2', level='system'))
def test_dataframe_to_t4csv(self):
""" Test reverse conversion (dataframe to T4-CSV) """
t = tempfile.NamedTemporaryFile()
t4files = df_tools.dataframe_to_t4csv(dataframe=self.test_data,
output=t.name)
self.assertTrue(len(t4files) > 0)
that = self.collector_test.get_stats_from_host(list(t4files.values()))
that = df_tools.consolidate_data(that, system=list(t4files.keys())[0])
assert_frame_equal(self.test_data, that)
def test_remove_duplicate_columns(self):
""" Test removing duplicate columns from a dataframe """
df1 = pd.DataFrame(np.random.randint(0, 10, (5, 5)),
columns=list('ABCBE'))
df2 = df_tools.remove_duplicate_columns(df1)
self.assertTupleEqual(df2.shape, (5, 4))
# When no duplicates should not alter the dataframe
assert_frame_equal(df2, df_tools.remove_duplicate_columns(df2))
def test_get_matching_columns(self):
"""
Test getting a list of columns based on regex and exclusion lists
"""
df1 = pd.DataFrame(np.random.randint(0, 10, (2, 9)),
columns=['one', 'two', 'one two', 'four',
'five', 'six', 'one six', 'eight',
'four five'])
self.assertEqual(['one', 'one two'],
df_tools.get_matching_columns(df1, 'one',
excluded='six'))
| mit |
apache/airflow | tests/providers/snowflake/transfers/test_snowflake_to_slack.py | 3 | 3577 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.models import DAG
from airflow.providers.snowflake.transfers.snowflake_to_slack import SnowflakeToSlackOperator
from airflow.utils import timezone
TEST_DAG_ID = 'snowflake_to_slack_unit_test'
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestSnowflakeToSlackOperator(unittest.TestCase):
def setUp(self):
self.example_dag = DAG('unit_test_dag_snowflake_to_slack', start_date=DEFAULT_DATE)
@staticmethod
def _construct_operator(**kwargs):
operator = SnowflakeToSlackOperator(task_id=TEST_DAG_ID, **kwargs)
return operator
@mock.patch('airflow.providers.snowflake.transfers.snowflake_to_slack.SnowflakeHook')
@mock.patch('airflow.providers.snowflake.transfers.snowflake_to_slack.SlackWebhookHook')
def test_hooks_and_rendering(self, mock_slack_hook_class, mock_snowflake_hook_class):
operator_args = {
'snowflake_conn_id': 'snowflake_connection',
'slack_conn_id': 'slack_connection',
'sql': "sql {{ ds }}",
'results_df_name': 'xxxx',
'warehouse': 'test_warehouse',
'database': 'test_database',
'role': 'test_role',
'schema': 'test_schema',
'parameters': ['1', '2', '3'],
'slack_message': 'message: {{ ds }}, {{ xxxx }}',
'slack_token': 'test_token',
'dag': self.example_dag,
}
snowflake_to_slack_operator = self._construct_operator(**operator_args)
snowflake_hook = mock_snowflake_hook_class.return_value
snowflake_hook.get_pandas_df.return_value = '1234'
slack_webhook_hook = mock_slack_hook_class.return_value
snowflake_to_slack_operator.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# Test that the Snowflake hook is instantiated with the right parameters
mock_snowflake_hook_class.assert_called_once_with(
database='test_database',
role='test_role',
schema='test_schema',
snowflake_conn_id='snowflake_connection',
warehouse='test_warehouse',
)
# Test that the get_pandas_df method is executed on the Snowflake hook with the pre-rendered sql and
# correct params
snowflake_hook.get_pandas_df.assert_called_once_with('sql 2017-01-01', parameters=['1', '2', '3'])
# Test that the Slack hook is instantiated with the right parameters
mock_slack_hook_class.assert_called_once_with(
http_conn_id='slack_connection', message='message: 2017-01-01, 1234', webhook_token='test_token'
)
# Test that the Slack hook's execute method gets run once
slack_webhook_hook.execute.assert_called_once()
| apache-2.0 |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/indexes/datetimes/test_join.py | 5 | 4768 | from datetime import datetime
import numpy as np
import pytest
from pandas import DatetimeIndex, Index, Timestamp, date_range, to_datetime
import pandas._testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd
class TestJoin:
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(
10,
10,
data_gen_f=lambda *args, **kwargs: np.random.randn(),
r_idx_type="i",
c_idx_type="dt",
)
cols = df.columns.join(df.index, how="outer")
joined = cols.join(df.columns)
assert cols.dtype == np.dtype("O")
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_join_self(self, join_type):
index = date_range("1/1/2000", periods=10)
joined = index.join(index, how=join_type)
assert index is joined
def test_join_with_period_index(self, join_type):
df = tm.makeCustomDataframe(
10,
10,
data_gen_f=lambda *args: np.random.randint(2),
c_idx_type="p",
r_idx_type="dt",
)
s = df.iloc[:5, 0]
expected = df.columns.astype("O").join(s.index, how=join_type)
result = df.columns.join(s.index, how=join_type)
tm.assert_index_equal(expected, result)
def test_join_object_index(self):
rng = date_range("1/1/2000", periods=10)
idx = Index(["a", "b", "c", "d"])
result = rng.join(idx, how="outer")
assert isinstance(result[0], Timestamp)
def test_join_utc_convert(self, join_type):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
left = rng.tz_convert("US/Eastern")
right = rng.tz_convert("Europe/Berlin")
result = left.join(left[:-5], how=join_type)
assert isinstance(result, DatetimeIndex)
assert result.tz == left.tz
result = left.join(right[:-5], how=join_type)
assert isinstance(result, DatetimeIndex)
assert result.tz.zone == "UTC"
def test_datetimeindex_union_join_empty(self, sort):
dti = date_range(start="1/1/2001", end="2/1/2001", freq="D")
empty = Index([])
result = dti.union(empty, sort=sort)
expected = dti.astype("O")
tm.assert_index_equal(result, expected)
result = dti.join(empty)
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, dti)
def test_join_nonunique(self):
idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"])
idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"])
rs = idx1.join(idx2, how="outer")
assert rs.is_monotonic
@pytest.mark.parametrize("freq", ["B", "C"])
def test_outer_join(self, freq):
# should just behave as union
start, end = datetime(2009, 1, 1), datetime(2010, 1, 1)
rng = date_range(start=start, end=end, freq=freq)
# overlapping
left = rng[:10]
right = rng[5:10]
the_join = left.join(right, how="outer")
assert isinstance(the_join, DatetimeIndex)
# non-overlapping, gap in middle
left = rng[:5]
right = rng[10:]
the_join = left.join(right, how="outer")
assert isinstance(the_join, DatetimeIndex)
assert the_join.freq is None
# non-overlapping, no gap
left = rng[:5]
right = rng[5:10]
the_join = left.join(right, how="outer")
assert isinstance(the_join, DatetimeIndex)
# overlapping, but different offset
other = date_range(start, end, freq=BMonthEnd())
the_join = rng.join(other, how="outer")
assert isinstance(the_join, DatetimeIndex)
assert the_join.freq is None
def test_naive_aware_conflicts(self):
start, end = datetime(2009, 1, 1), datetime(2010, 1, 1)
naive = date_range(start, end, freq=BDay(), tz=None)
aware = date_range(start, end, freq=BDay(), tz="Asia/Hong_Kong")
msg = "tz-naive.*tz-aware"
with pytest.raises(TypeError, match=msg):
naive.join(aware)
with pytest.raises(TypeError, match=msg):
aware.join(naive)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_join_preserves_freq(self, tz):
# GH#32157
dti = date_range("2016-01-01", periods=10, tz=tz)
result = dti[:5].join(dti[5:], how="outer")
assert result.freq == dti.freq
tm.assert_index_equal(result, dti)
result = dti[:5].join(dti[6:], how="outer")
assert result.freq is None
expected = dti.delete(5)
tm.assert_index_equal(result, expected)
| gpl-2.0 |
IQuOD/AutoQC | util/benchmarks.py | 3 | 4332 | import util.combineTests as combinatorics
import matplotlib.pyplot as plt
import numpy as np
def compare_to_truth(combos, trueResult):
'''Given the results from all the possible combinations of tests (combos)
and a set of truth results (trueResult), the false positive rate and the
true positive rate (the rate at which the combination of test incorrectly
reject a profile or correctly reject a profile, respectively) are
calculated (in %).
Results are returned in an identical set of lists to combos except the
quality control results are replaced by the false and true positive
rates.
'''
benchmarks = []
for combo in combos:
assert len(combo[1]) == len(trueResult), 'Different number of true results and estimated results reported.'
tt = 0.0
tf = 0.0
ft = 0.0
ff = 0.0
for i in range(len(trueResult)):
if combo[1][i] == True and trueResult[i] == True:
tt += 1
elif combo[1][i] == True and trueResult[i] == False:
tf += 1
elif combo[1][i] == False and trueResult[i] == True:
ft += 1
else:
ff += 1
assert (tf + ff) > 0, 'No non-rejected profiles are available'
assert (tt + ft) > 0, 'No rejected profiles are available'
falsePositiveRate = tf / (tf + ff) * 100.0
truePositiveRate = tt / (tt + ft) * 100.0
benchmarks.append([combo[0],
[falsePositiveRate, truePositiveRate],
combo[2]])
return benchmarks
def plot_roc(bmResults):
'''Plots the results generated by compare_to_truth and also prints
the results to screen.
The plot shows the true positive rate (the percentage of profiles that
should be rejected that actually are rejected) against the false
positive rate (the percentage of profiles that should not have been
rejected but were). Ideally we would like a true positive rate of 100%
and a false positive rate of 0%. The combinations that yield the lowest
false positive rate are shown in red; the highest true positive rate
combinations in blue and the closest to the ideal of 100% true positive
rate and 0% false positive rate in green. Other combinations are shown
by black crosses.
'''
# Results with the highest true positive rate, lowest false positive rate
# and the closest to tpr = 100, fpr = 0 are highlighted.
fpr = np.array([bmResults[i][1][0] for i in range(len(bmResults))])
tpr = np.array([bmResults[i][1][1] for i in range(len(bmResults))])
dists = np.sqrt(fpr**2 + (100.0 - tpr)**2)
lFpr = fpr == np.min(fpr[tpr > 0]) # Ignore if no good rejects are made.
lTpr = tpr == np.max(tpr[fpr < 100]) # Ignore if all good projiles are rejected.
lDist = dists == np.min(dists)
# Select the combinations using the fewest tests.
nComb = []
for bm in bmResults:
nComb.append(np.sum([len(sublist) for sublist in bm[0]]))
nComb = np.array(nComb)
lFpr = lFpr & (nComb == np.min(nComb[lFpr]))
lTpr = lTpr & (nComb == np.min(nComb[lTpr]))
lDist = lDist & (nComb == np.min(nComb[lDist]))
for num, bm in enumerate(bmResults):
printout = False
c = 'xk'
if lFpr[num]:
c = 'or'
print('*** Lowest false positive rate ***')
printout = True
if lTpr[num]:
c = 'ob'
print('*** Highest true positive rate ***')
printout = True
if lDist[num]:
c = 'og'
print('*** Closest to FPR = 0 and TPR = 100 ***')
printout = True
if printout:
print(' Combination ' + str(num+1) + ':')
print(' ' + combinatorics.combinationStr(bm[0], bm[2]))
print(' False Positive Rate = {0:5.1f}%'.format(bm[1][0]))
print(' True Positive Rate = {0:5.1f}%'.format(bm[1][1]))
print('')
plt.plot(bm[1][0], bm[1][1], c, label='Combo ' + str(num+1))
plt.gca().set_xlim(0.0, 100.0)
plt.gca().set_ylim(0.0, 100.0)
plt.gca().set_xlabel('False positive rate (%)')
plt.gca().set_ylabel('True positive rate (%)')
plt.show()
| mit |
bengio/deeptorch | scripts/visualizing.py | 3 | 3376 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""One-line documentation for visualizing module.
A detailed description of visualizing.
"""
__author__ = '[email protected] (Pierre-Antoine Manzagol)'
from google3.pyglib import app
from google3.pyglib import flags
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
import numpy
FLAGS = flags.FLAGS
def weights_filename_to_array(filename):
W = open(filename)
lines = W.readlines()
W.close()
# init weight
n_neurons = len(lines)
n_inputs = len(lines[0].split())
print str(n_neurons) + " neurons and " + str(n_inputs) + " inputs."
weights = numpy.empty([n_neurons,n_inputs])
# fill the weights
for i,line in enumerate(lines):
neuron_weights = line.split()
# convert strings to floats
for j,w in enumerate(neuron_weights):
weights[i][j] = float(w)
return [n_inputs, n_neurons, weights]
def plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal, location,
filename, do_transpose, label):
[n_inputs, n_neurons, weights] = weights_filename_to_array(filename)
if do_transpose:
weights = numpy.transpose(weights)
pylab.subplot(nsubplot_vertical, nsubplot_horizontal, location)
pylab.imshow(weights, aspect='auto', interpolation='nearest')
pylab.xlabel(label)
pylab.gray()
def plot_representation(n_subplot_vertical, n_suplot_horizontal, location,
filename, label):
# let's use this inappropriatly named function
[n_inputs, n_neurons, weights] = weights_filename_to_array(filename)
representations = weights
pylab.subplot(n_subplot_vertical, n_suplot_horizontal, location)
pylab.imshow(representations, aspect='auto', interpolation='nearest')
pylab.xlabel(label)
pylab.gray()
def visualize_representations(dir, nlayers):
pylab.clf()
n_subplot_vertical = nlayers+1
n_subplot_horizontal = 4
# input
location = 1 + nlayers*n_subplot_horizontal + 1
filename = dir+"representations/input.txt"
plot_representation(n_subplot_vertical, n_subplot_horizontal, location,
filename, "x")
#
for i in range(nlayers):
# reconstruction of input
location = 1 + (nlayers-i)*n_subplot_horizontal
filename = dir+"representations/recons_from_hidden_l" + str(i) + ".txt"
plot_representation(n_subplot_vertical, n_subplot_horizontal, location,
filename, "rebuilt from h")
# hidden layer
location = 1 + (nlayers-i-1)*n_subplot_horizontal + 1
filename = dir+"representations/hidden_l" + str(i) + ".txt"
plot_representation(n_subplot_vertical, n_subplot_horizontal, location,
filename, "hidden")
# reconstruction of layer through speech
location = 1 + (nlayers-i-1)*n_subplot_horizontal + 2
filename = dir+"representations/recons_from_speech_l" + str(i) + ".txt"
plot_representation(n_subplot_vertical, n_subplot_horizontal, location,
filename, "rebuilt from s")
# speech
location = 1 + (nlayers-i-1)*n_subplot_horizontal + 3
filename = dir+"representations/speech_l" + str(i) + ".txt"
plot_representation(n_subplot_vertical, n_subplot_horizontal, location,
filename, "speech")
pylab.savefig(dir + "representations.png")
| apache-2.0 |
johnbachman/indra | indra/sources/ctd/api.py | 4 | 2766 | import pandas
from .processor import CTDProcessor, CTDChemicalDiseaseProcessor, \
CTDGeneDiseaseProcessor, CTDChemicalGeneProcessor
base_url = 'http://ctdbase.org/reports/'
urls = {
'chemical_gene': base_url + 'CTD_chem_gene_ixns.tsv.gz',
'chemical_disease': base_url + 'CTD_chemicals_diseases.tsv.gz',
'gene_disease': base_url + 'CTD_genes_diseases.tsv.gz',
}
processors = {
'chemical_gene': CTDChemicalGeneProcessor,
'chemical_disease': CTDChemicalDiseaseProcessor,
'gene_disease': CTDGeneDiseaseProcessor,
}
def process_from_web(subset, url=None):
"""Process a subset of CTD from the web into INDRA Statements.
Parameters
----------
subset : str
A CTD subset, one of chemical_gene, chemical_disease,
gene_disease.
url : Optional[str]
If not provided, the default CTD URL is used (beware, it usually
gives permission denied). If provided, the given URL is used to
access a tsv or tsv.gz file.
Returns
-------
CTDProcessor
A CTDProcessor which contains INDRA Statements extracted from the
given CTD subset as its statements attribute.
"""
if subset not in urls:
raise ValueError('%s is not a valid CTD subset.' % subset)
url = url if url else urls[subset]
return _process_url_or_file(url, subset)
def process_tsv(fname, subset):
"""Process a subset of CTD from a tsv or tsv.gz file into INDRA Statements.
Parameters
----------
fname : str
Path to a tsv or tsv.gz file of the given CTD subset.
subset : str
A CTD subset, one of chemical_gene, chemical_disease,
gene_disease.
Returns
-------
CTDProcessor
A CTDProcessor which contains INDRA Statements extracted from the
given CTD subset as its statements attribute.
"""
return _process_url_or_file(fname, subset)
def _process_url_or_file(path, subset):
df = pandas.read_csv(path, sep='\t', comment='#',
header=None, dtype=str, keep_default_na=False)
return process_dataframe(df, subset)
def process_dataframe(df, subset):
"""Process a subset of CTD from a DataFrame into INDRA Statements.
Parameters
----------
df : pandas.DataFrame
A DataFrame of the given CTD subset.
subset : str
A CTD subset, one of chemical_gene, chemical_disease,
gene_disease.
Returns
-------
CTDProcessor
A CTDProcessor which contains INDRA Statements extracted from the
given CTD subset as its statements attribute.
"""
if subset not in processors:
raise ValueError('%s is not a valid CTD subset.' % subset)
cp = processors[subset](df)
cp.extract_statements()
return cp
| bsd-2-clause |
carefree0910/MachineLearning | Zhihu/NN/_extra/one/Networks.py | 1 | 5075 | import matplotlib.pyplot as plt
from Zhihu.NN._extra.Layers import *
from Zhihu.NN._extra.Optimizers import *
class NNDist:
NNTiming = Timing()
def __init__(self):
self._layers, self._weights, self._bias = [], [], []
self._w_optimizer = self._b_optimizer = None
self._current_dimension = 0
@NNTiming.timeit(level=4, prefix="[API] ")
def feed_timing(self, timing):
if isinstance(timing, Timing):
self.NNTiming = timing
for layer in self._layers:
layer.feed_timing(timing)
def __str__(self):
return "Neural Network"
__repr__ = __str__
# Utils
@NNTiming.timeit(level=4)
def _add_weight(self, shape):
self._weights.append(np.random.randn(*shape))
self._bias.append(np.zeros((1, shape[1])))
@NNTiming.timeit(level=4)
def _add_layer(self, layer, *args):
_parent = self._layers[-1]
_current, _next = args
self._layers.append(layer)
if isinstance(layer, CostLayer):
_parent.child = layer
self.parent = _parent
self._add_weight((1, 1))
self._current_dimension = _next
else:
self._add_weight((_current, _next))
self._current_dimension = _next
@NNTiming.timeit(level=4)
def _add_cost_layer(self):
_last_layer = self._layers[-1]
if _last_layer.name == "Sigmoid":
_cost_func = "Cross Entropy"
elif _last_layer.name == "Softmax":
_cost_func = "Log Likelihood"
else:
_cost_func = "MSE"
_cost_layer = CostLayer(_last_layer, (self._current_dimension,), _cost_func)
self.add(_cost_layer)
@NNTiming.timeit(level=1)
def _get_prediction(self, x):
return self._get_activations(x).pop()
@NNTiming.timeit(level=1)
def _get_activations(self, x):
_activations = [self._layers[0].activate(x, self._weights[0], self._bias[0])]
for i, layer in enumerate(self._layers[1:]):
_activations.append(layer.activate(
_activations[-1], self._weights[i + 1], self._bias[i + 1]))
return _activations
# Optimizing Process
def _init_optimizers(self, lr):
self._w_optimizer, self._b_optimizer = Adam(lr), Adam(lr)
self._w_optimizer.feed_variables(self._weights)
self._b_optimizer.feed_variables(self._bias)
@NNTiming.timeit(level=1)
def _opt(self, i, _activation, _delta):
self._weights[i] += self._w_optimizer.run(
i, _activation.reshape(_activation.shape[0], -1).T.dot(_delta)
)
self._bias[i] += self._b_optimizer.run(
i, np.sum(_delta, axis=0, keepdims=True)
)
# API
@NNTiming.timeit(level=4, prefix="[API] ")
def add(self, layer):
if not self._layers:
self._layers, self._current_dimension = [layer], layer.shape[1]
self._add_weight(layer.shape)
else:
_next = layer.shape[0]
layer.shape = (self._current_dimension, _next)
self._add_layer(layer, self._current_dimension, _next)
@NNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x=None, y=None, lr=0.01, epoch=10):
# Initialize
self._add_cost_layer()
self._init_optimizers(lr)
layer_width = len(self._layers)
# Train
for counter in range(epoch):
self._w_optimizer.update(); self._b_optimizer.update()
_activations = self._get_activations(x)
_deltas = [self._layers[-1].bp_first(y, _activations[-1])]
for i in range(-1, -len(_activations), -1):
_deltas.append(self._layers[i - 1].bp(
_activations[i - 1], self._weights[i], _deltas[-1]
))
for i in range(layer_width - 2, 0, -1):
self._opt(i, _activations[i - 1], _deltas[layer_width-i-1])
self._opt(0, x, _deltas[-1])
@NNTiming.timeit(level=4, prefix="[API] ")
def predict(self, x):
return self._get_prediction(x)
@NNTiming.timeit(level=4, prefix="[API] ")
def evaluate(self, x, y):
y_pred = self.predict(x)
y_arg = np.argmax(y, axis=1)
y_pred_arg = np.argmax(y_pred, axis=1)
print("Acc: {:8.6}".format(np.sum(y_arg == y_pred_arg) / len(y_arg)))
def visualize_2d(self, x, y, plot_scale=2, plot_precision=0.01):
plot_num = int(1 / plot_precision)
xf = np.linspace(np.min(x) * plot_scale, np.max(x) * plot_scale, plot_num)
yf = np.linspace(np.min(x) * plot_scale, np.max(x) * plot_scale, plot_num)
input_x, input_y = np.meshgrid(xf, yf)
input_xs = np.c_[input_x.ravel(), input_y.ravel()]
output_ys_2d = np.argmax(self.predict(input_xs), axis=1).reshape(len(xf), len(yf))
plt.contourf(input_x, input_y, output_ys_2d, cmap=plt.cm.Spectral)
plt.scatter(x[:, 0], x[:, 1], c=np.argmax(y, axis=1), s=40, cmap=plt.cm.Spectral)
plt.axis("off")
plt.show()
| mit |
enigmampc/catalyst | tests/pipeline/test_term.py | 1 | 25102 | """
Tests for Term.
"""
from collections import Counter
from itertools import product
from unittest import TestCase
from toolz import assoc
import pandas as pd
from catalyst.assets import Asset
from catalyst.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonWindowSafeInput,
NotDType,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from catalyst.pipeline import (
Classifier,
CustomClassifier,
CustomFactor,
Factor,
Filter,
ExecutionPlan,
)
from catalyst.pipeline.data import Column, DataSet
from catalyst.pipeline.data.testing import TestingDataSet
from catalyst.pipeline.expression import NUMEXPR_MATH_FUNCS
from catalyst.pipeline.factors import RecarrayField
from catalyst.pipeline.sentinels import NotSpecified
from catalyst.pipeline.term import AssetExists, Slice
from catalyst.testing import parameter_space
from catalyst.testing.fixtures import WithTradingSessions, CatalystTestCase
from catalyst.testing.predicates import (
assert_equal,
assert_raises,
assert_raises_regex,
assert_regex,
)
from catalyst.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
complex128_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
NoDefaultMissingValue,
)
class SomeDataSet(DataSet):
foo = Column(float64_dtype)
bar = Column(float64_dtype)
buzz = Column(float64_dtype)
class SubDataSet(SomeDataSet):
pass
class SubDataSetNewCol(SomeDataSet):
qux = Column(float64_dtype)
class SomeFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
SomeFactorAlias = SomeFactor
class SomeOtherFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.bar, SomeDataSet.buzz]
class DateFactor(Factor):
dtype = datetime64ns_dtype
window_length = 5
inputs = [SomeDataSet.bar, SomeDataSet.buzz]
class NoLookbackFactor(Factor):
dtype = float64_dtype
window_length = 0
class GenericCustomFactor(CustomFactor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo]
class MultipleOutputs(CustomFactor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
outputs = ['alpha', 'beta']
def some_method(self):
return
class GenericFilter(Filter):
dtype = bool_dtype
window_length = 0
inputs = []
class GenericClassifier(Classifier):
dtype = categorical_dtype
window_length = 0
inputs = []
def gen_equivalent_factors():
"""
Return an iterator of SomeFactor instances that should all be the same
object.
"""
yield SomeFactor()
yield SomeFactor(inputs=NotSpecified)
yield SomeFactor(SomeFactor.inputs)
yield SomeFactor(inputs=SomeFactor.inputs)
yield SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
yield SomeFactor(window_length=SomeFactor.window_length)
yield SomeFactor(window_length=NotSpecified)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=NotSpecified,
)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=SomeFactor.window_length,
)
yield SomeFactorAlias()
def to_dict(l):
"""
Convert a list to a dict with keys drawn from '0', '1', '2', ...
Examples
--------
>>> to_dict([2, 3, 4]) # doctest: +SKIP
{'0': 2, '1': 3, '2': 4}
"""
return dict(zip(map(str, range(len(l))), l))
class DependencyResolutionTestCase(WithTradingSessions, CatalystTestCase):
TRADING_CALENDAR_STRS = ('NYSE',)
START_DATE = pd.Timestamp('2014-01-02', tz='UTC')
END_DATE = pd.Timestamp('2014-12-31', tz='UTC')
execution_plan_start = pd.Timestamp('2014-06-01', tz='UTC')
execution_plan_end = pd.Timestamp('2014-06-30', tz='UTC')
def check_dependency_order(self, ordered_terms):
seen = set()
for term in ordered_terms:
for dep in term.dependencies:
self.assertIn(dep, seen)
seen.add(term)
def make_execution_plan(self, terms):
return ExecutionPlan(
terms,
self.nyse_sessions,
self.execution_plan_start,
self.execution_plan_end,
)
def test_single_factor(self):
"""
Test dependency resolution for a single factor.
"""
def check_output(graph):
resolution_order = list(graph.ordered())
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertIn(SomeDataSet.foo, resolution_order)
self.assertIn(SomeDataSet.bar, resolution_order)
self.assertIn(SomeFactor(), resolution_order)
self.assertEqual(
graph.graph.node[SomeDataSet.foo]['extra_rows'],
4,
)
self.assertEqual(
graph.graph.node[SomeDataSet.bar]['extra_rows'],
4,
)
for foobar in gen_equivalent_factors():
check_output(self.make_execution_plan(to_dict([foobar])))
def test_single_factor_instance_args(self):
"""
Test dependency resolution for a single factor with arguments passed to
the constructor.
"""
bar, buzz = SomeDataSet.bar, SomeDataSet.buzz
factor = SomeFactor([bar, buzz], window_length=5)
graph = self.make_execution_plan(to_dict([factor]))
resolution_order = list(graph.ordered())
# SomeFactor, its inputs, and AssetExists()
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertEqual(graph.extra_rows[AssetExists()], 4)
self.assertIn(bar, resolution_order)
self.assertIn(buzz, resolution_order)
self.assertIn(SomeFactor([bar, buzz], window_length=5),
resolution_order)
self.assertEqual(graph.extra_rows[bar], 4)
self.assertEqual(graph.extra_rows[buzz], 4)
def test_reuse_loadable_terms(self):
"""
Test that raw inputs only show up in the dependency graph once.
"""
f1 = SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
f2 = SomeOtherFactor([SomeDataSet.bar, SomeDataSet.buzz])
graph = self.make_execution_plan(to_dict([f1, f2]))
resolution_order = list(graph.ordered())
# bar should only appear once.
self.assertEqual(len(resolution_order), 6)
self.assertEqual(len(set(resolution_order)), 6)
self.check_dependency_order(resolution_order)
def test_disallow_recursive_lookback(self):
with self.assertRaises(NonWindowSafeInput):
SomeFactor(inputs=[SomeFactor(), SomeDataSet.foo])
class ObjectIdentityTestCase(TestCase):
def assertSameObject(self, *objs):
first = objs[0]
for obj in objs:
self.assertIs(first, obj)
def assertDifferentObjects(self, *objs):
id_counts = Counter(map(id, objs))
((most_common_id, count),) = id_counts.most_common(1)
if count > 1:
dupe = [o for o in objs if id(o) == most_common_id][0]
self.fail("%s appeared %d times in %s" % (dupe, count, objs))
def test_instance_caching(self):
self.assertSameObject(*gen_equivalent_factors())
self.assertIs(
SomeFactor(window_length=SomeFactor.window_length + 1),
SomeFactor(window_length=SomeFactor.window_length + 1),
)
self.assertIs(
SomeFactor(dtype=float64_dtype),
SomeFactor(dtype=float64_dtype),
)
self.assertIs(
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
mask = SomeFactor() + SomeOtherFactor()
self.assertIs(SomeFactor(mask=mask), SomeFactor(mask=mask))
def test_instance_caching_multiple_outputs(self):
self.assertIs(MultipleOutputs(), MultipleOutputs())
self.assertIs(
MultipleOutputs(),
MultipleOutputs(outputs=MultipleOutputs.outputs),
)
self.assertIs(
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
)
# Ensure that both methods of accessing our outputs return the same
# things.
multiple_outputs = MultipleOutputs()
alpha, beta = MultipleOutputs()
self.assertIs(alpha, multiple_outputs.alpha)
self.assertIs(beta, multiple_outputs.beta)
def test_instance_caching_of_slices(self):
my_asset = Asset(1, exchange="TEST")
f = GenericCustomFactor()
f_slice = f[my_asset]
self.assertIs(f_slice, Slice(GenericCustomFactor(), my_asset))
f = GenericFilter()
f_slice = f[my_asset]
self.assertIs(f_slice, Slice(GenericFilter(), my_asset))
c = GenericClassifier()
c_slice = c[my_asset]
self.assertIs(c_slice, Slice(GenericClassifier(), my_asset))
def test_instance_non_caching(self):
f = SomeFactor()
# Different window_length.
self.assertIsNot(
f,
SomeFactor(window_length=SomeFactor.window_length + 1),
)
# Different dtype
self.assertIsNot(
f,
SomeFactor(dtype=datetime64ns_dtype)
)
# Reordering inputs changes semantics.
self.assertIsNot(
f,
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
def test_instance_non_caching_redefine_class(self):
orig_foobar_instance = SomeFactorAlias()
class SomeFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
self.assertIsNot(orig_foobar_instance, SomeFactor())
def test_instance_non_caching_multiple_outputs(self):
multiple_outputs = MultipleOutputs()
# Different outputs.
self.assertIsNot(
MultipleOutputs(), MultipleOutputs(outputs=['beta', 'gamma']),
)
# Reordering outputs.
self.assertIsNot(
multiple_outputs,
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
)
# Different factors sharing an output name should produce different
# RecarrayField factors.
orig_beta = multiple_outputs.beta
beta, gamma = MultipleOutputs(outputs=['beta', 'gamma'])
self.assertIsNot(beta, orig_beta)
def test_instance_caching_binops(self):
f = SomeFactor()
g = SomeOtherFactor()
for lhs, rhs in product([f, g], [f, g]):
self.assertIs((lhs + rhs), (lhs + rhs))
self.assertIs((lhs - rhs), (lhs - rhs))
self.assertIs((lhs * rhs), (lhs * rhs))
self.assertIs((lhs / rhs), (lhs / rhs))
self.assertIs((lhs ** rhs), (lhs ** rhs))
self.assertIs((1 + rhs), (1 + rhs))
self.assertIs((rhs + 1), (rhs + 1))
self.assertIs((1 - rhs), (1 - rhs))
self.assertIs((rhs - 1), (rhs - 1))
self.assertIs((2 * rhs), (2 * rhs))
self.assertIs((rhs * 2), (rhs * 2))
self.assertIs((2 / rhs), (2 / rhs))
self.assertIs((rhs / 2), (rhs / 2))
self.assertIs((2 ** rhs), (2 ** rhs))
self.assertIs((rhs ** 2), (rhs ** 2))
self.assertIs((f + g) + (f + g), (f + g) + (f + g))
def test_instance_caching_unary_ops(self):
f = SomeFactor()
self.assertIs(-f, -f)
self.assertIs(--f, --f)
self.assertIs(---f, ---f)
def test_instance_caching_math_funcs(self):
f = SomeFactor()
for funcname in NUMEXPR_MATH_FUNCS:
method = getattr(f, funcname)
self.assertIs(method(), method())
def test_instance_caching_grouped_transforms(self):
f = SomeFactor()
c = GenericClassifier()
m = GenericFilter()
for meth in f.demean, f.zscore, f.rank:
self.assertIs(meth(), meth())
self.assertIs(meth(groupby=c), meth(groupby=c))
self.assertIs(meth(mask=m), meth(mask=m))
self.assertIs(meth(groupby=c, mask=m), meth(groupby=c, mask=m))
class SomeFactorParameterized(SomeFactor):
params = ('a', 'b')
def test_parameterized_term(self):
f = self.SomeFactorParameterized(a=1, b=2)
self.assertEqual(f.params, {'a': 1, 'b': 2})
g = self.SomeFactorParameterized(a=1, b=3)
h = self.SomeFactorParameterized(a=2, b=2)
self.assertDifferentObjects(f, g, h)
f2 = self.SomeFactorParameterized(a=1, b=2)
f3 = self.SomeFactorParameterized(b=2, a=1)
self.assertSameObject(f, f2, f3)
self.assertEqual(f.params['a'], 1)
self.assertEqual(f.params['b'], 2)
self.assertEqual(f.window_length, SomeFactor.window_length)
self.assertEqual(f.inputs, tuple(SomeFactor.inputs))
def test_parameterized_term_non_hashable_arg(self):
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=1)
assert_equal(
str(e.exception),
"SomeFactorParameterized expected a hashable value for parameter"
" 'a', but got [] instead.",
)
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=1, b=[])
assert_equal(
str(e.exception),
"SomeFactorParameterized expected a hashable value for parameter"
" 'b', but got [] instead.",
)
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=[])
assert_regex(
str(e.exception),
r"SomeFactorParameterized expected a hashable value for parameter"
r" '(a|b)', but got \[\] instead\.",
)
def test_parameterized_term_default_value(self):
defaults = {'a': 'default for a', 'b': 'default for b'}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
assert_equal(F().params, defaults)
assert_equal(F(a='new a').params, assoc(defaults, 'a', 'new a'))
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(
F(a='new a', b='new b').params,
{'a': 'new a', 'b': 'new b'},
)
def test_parameterized_term_default_value_with_not_specified(self):
defaults = {'a': 'default for a', 'b': NotSpecified}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
pattern = r"F expected a keyword parameter 'b'\."
with assert_raises_regex(TypeError, pattern):
F()
with assert_raises_regex(TypeError, pattern):
F(a='new a')
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(
F(a='new a', b='new b').params,
{'a': 'new a', 'b': 'new b'},
)
def test_bad_input(self):
class SomeFactor(Factor):
dtype = float64_dtype
class SomeFactorDefaultInputs(SomeFactor):
inputs = (SomeDataSet.foo, SomeDataSet.bar)
class SomeFactorDefaultLength(SomeFactor):
window_length = 10
class SomeFactorNoDType(SomeFactor):
window_length = 10
inputs = (SomeDataSet.foo,)
dtype = NotSpecified
with self.assertRaises(TermInputsNotSpecified):
SomeFactor(window_length=1)
with self.assertRaises(TermInputsNotSpecified):
SomeFactorDefaultLength()
with self.assertRaises(WindowLengthNotSpecified):
SomeFactor(inputs=(SomeDataSet.foo,))
with self.assertRaises(WindowLengthNotSpecified):
SomeFactorDefaultInputs()
with self.assertRaises(DTypeNotSpecified):
SomeFactorNoDType()
with self.assertRaises(NotDType):
SomeFactor(dtype=1)
with self.assertRaises(NoDefaultMissingValue):
SomeFactor(dtype=int64_dtype)
with self.assertRaises(UnsupportedDType):
SomeFactor(dtype=complex128_dtype)
with self.assertRaises(TermOutputsEmpty):
MultipleOutputs(outputs=[])
def test_bad_output_access(self):
with self.assertRaises(AttributeError) as e:
SomeFactor().not_an_attr
errmsg = str(e.exception)
self.assertEqual(
errmsg, "'SomeFactor' object has no attribute 'not_an_attr'",
)
mo = MultipleOutputs()
with self.assertRaises(AttributeError) as e:
mo.not_an_attr
errmsg = str(e.exception)
expected = (
"Instance of MultipleOutputs has no output named 'not_an_attr'."
" Possible choices are: ('alpha', 'beta')."
)
self.assertEqual(errmsg, expected)
with self.assertRaises(ValueError) as e:
alpha, beta = GenericCustomFactor()
errmsg = str(e.exception)
self.assertEqual(
errmsg, "GenericCustomFactor does not have multiple outputs.",
)
# Public method, user-defined method.
# Accessing these attributes should return the output, not the method.
conflicting_output_names = ['zscore', 'some_method']
mo = MultipleOutputs(outputs=conflicting_output_names)
for name in conflicting_output_names:
self.assertIsInstance(getattr(mo, name), RecarrayField)
# Non-callable attribute, private method, special method.
disallowed_output_names = ['inputs', '_init', '__add__']
for name in disallowed_output_names:
with self.assertRaises(InvalidOutputName):
GenericCustomFactor(outputs=[name])
def test_require_super_call_in_validate(self):
class MyFactor(Factor):
inputs = ()
dtype = float64_dtype
window_length = 0
def _validate(self):
"Woops, I didn't call super()!"
with self.assertRaises(AssertionError) as e:
MyFactor()
errmsg = str(e.exception)
self.assertEqual(
errmsg,
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
def test_latest_on_different_dtypes(self):
factor_dtypes = (float64_dtype, datetime64ns_dtype)
for column in TestingDataSet.columns:
if column.dtype == bool_dtype:
self.assertIsInstance(column.latest, Filter)
elif (column.dtype == int64_dtype
or column.dtype.kind in ('O', 'S', 'U')):
self.assertIsInstance(column.latest, Classifier)
elif column.dtype in factor_dtypes:
self.assertIsInstance(column.latest, Factor)
else:
self.fail(
"Unknown dtype %s for column %s" % (column.dtype, column)
)
# These should be the same value, plus this has the convenient
# property of correctly handling `NaN`.
self.assertIs(column.missing_value, column.latest.missing_value)
def test_failure_timing_on_bad_dtypes(self):
# Just constructing a bad column shouldn't fail.
Column(dtype=int64_dtype)
with self.assertRaises(NoDefaultMissingValue) as e:
class BadDataSet(DataSet):
bad_column = Column(dtype=int64_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3)
self.assertTrue(
str(e.exception.args[0]).startswith(
"Failed to create Column with name 'bad_column'"
)
)
Column(dtype=complex128_dtype)
with self.assertRaises(UnsupportedDType):
class BadDataSetComplex(DataSet):
bad_column = Column(dtype=complex128_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3)
class SubDataSetTestCase(TestCase):
def test_subdataset(self):
some_dataset_map = {
column.name: column for column in SomeDataSet.columns
}
sub_dataset_map = {
column.name: column for column in SubDataSet.columns
}
self.assertEqual(
{column.name for column in SomeDataSet.columns},
{column.name for column in SubDataSet.columns},
)
for k, some_dataset_column in some_dataset_map.items():
sub_dataset_column = sub_dataset_map[k]
self.assertIsNot(
some_dataset_column,
sub_dataset_column,
'subclass column %r should not have the same identity as'
' the parent' % k,
)
self.assertEqual(
some_dataset_column.dtype,
sub_dataset_column.dtype,
'subclass column %r should have the same dtype as the parent' %
k,
)
def test_add_column(self):
some_dataset_map = {
column.name: column for column in SomeDataSet.columns
}
sub_dataset_new_col_map = {
column.name: column for column in SubDataSetNewCol.columns
}
sub_col_names = {column.name for column in SubDataSetNewCol.columns}
# check our extra col
self.assertIn('qux', sub_col_names)
self.assertEqual(
sub_dataset_new_col_map['qux'].dtype,
float64_dtype,
)
self.assertEqual(
{column.name for column in SomeDataSet.columns},
sub_col_names - {'qux'},
)
for k, some_dataset_column in some_dataset_map.items():
sub_dataset_column = sub_dataset_new_col_map[k]
self.assertIsNot(
some_dataset_column,
sub_dataset_column,
'subclass column %r should not have the same identity as'
' the parent' % k,
)
self.assertEqual(
some_dataset_column.dtype,
sub_dataset_column.dtype,
'subclass column %r should have the same dtype as the parent' %
k,
)
@parameter_space(
dtype_=[categorical_dtype, int64_dtype],
outputs_=[('a',), ('a', 'b')],
)
def test_reject_multi_output_classifiers(self, dtype_, outputs_):
"""
Multi-output CustomClassifiers don't work because they use special
output allocation for string arrays.
"""
class SomeClassifier(CustomClassifier):
dtype = dtype_
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
outputs = outputs_
missing_value = dtype_.type('123')
expected_error = (
"SomeClassifier does not support custom outputs, "
"but received custom outputs={outputs}.".format(outputs=outputs_)
)
with self.assertRaises(ValueError) as e:
SomeClassifier()
self.assertEqual(str(e.exception), expected_error)
with self.assertRaises(ValueError) as e:
SomeClassifier()
self.assertEqual(str(e.exception), expected_error)
def test_unreasonable_missing_values(self):
for base_type, dtype_, bad_mv in ((Factor, float64_dtype, 'ayy'),
(Filter, bool_dtype, 'lmao'),
(Classifier, int64_dtype, 'lolwut'),
(Classifier, categorical_dtype, 7)):
class SomeTerm(base_type):
inputs = ()
window_length = 0
missing_value = bad_mv
dtype = dtype_
with self.assertRaises(TypeError) as e:
SomeTerm()
prefix = (
"^Missing value {mv!r} is not a valid choice "
"for term SomeTerm with dtype {dtype}.\n\n"
"Coercion attempt failed with:"
).format(mv=bad_mv, dtype=dtype_)
self.assertRegexpMatches(str(e.exception), prefix)
| apache-2.0 |
arjoly/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
bob-anderson-ok/py-ote | src/pyoteapp/pyote.py | 1 | 199800 | """
Created on Sat May 20 15:32:13 2017
@author: Bob Anderson
"""
import subprocess
MIN_SIGMA = 0.1
import datetime
import os
import sys
import platform
from openpyxl import load_workbook
from math import trunc, floor
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Qt5Agg')
from pyoteapp.showVideoFrames import readAviFile
from pyoteapp.showVideoFrames import readSerFile
from pyoteapp.showVideoFrames import readFitsFile
from pyoteapp.showVideoFrames import readAavFile
from pyoteapp.false_positive import compute_drops
import numpy as np
import pyqtgraph as pg
import pyqtgraph.exporters as pex
import scipy.signal
from PyQt5 import QtCore, QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import QSettings, QPoint, QSize
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QFileDialog, QMessageBox, QDialog
from pyqtgraph import PlotWidget
from pyoteapp import version
from pyoteapp import fixedPrecision as fp
from pyoteapp import gui
from pyoteapp import timestampDialog
from pyoteapp import helpDialog
from pyoteapp.checkForNewerVersion import getMostRecentVersionOfPyOTEViaJson
from pyoteapp.checkForNewerVersion import upgradePyote
from pyoteapp.csvreader import readLightCurve
from pyoteapp.errorBarUtils import ciBars
from pyoteapp.errorBarUtils import createDurDistribution
from pyoteapp.errorBarUtils import edgeDistributionGenerator
from pyoteapp.noiseUtils import getCorCoefs
from pyoteapp.solverUtils import candidateCounter, solver, subFrameAdjusted
from pyoteapp.timestampUtils import convertTimeStringToTime
from pyoteapp.timestampUtils import convertTimeToTimeString
from pyoteapp.timestampUtils import getTimeStepAndOutliers
from pyoteapp.timestampUtils import manualTimeStampEntry
from pyoteapp.blockIntegrateUtils import mean_std_versus_offset
from pyoteapp.iterative_logl_functions import locate_event_from_d_and_r_ranges
from pyoteapp.iterative_logl_functions import find_best_event_from_min_max_size
from pyoteapp.iterative_logl_functions import find_best_r_only_from_min_max_size
from pyoteapp.iterative_logl_functions import find_best_d_only_from_min_max_size
from pyoteapp.subframe_timing_utilities import generate_underlying_lightcurve_plots, fresnel_length_km
from pyoteapp.subframe_timing_utilities import time_correction, intensity_at_time
cursorAlert = pyqtSignal()
# The gui module was created by typing
# !pyuic5 simple_plot2.ui -o gui.py
# in the IPython console while in pyoteapp directory
# The timestampDialog module was created by typing
# !pyuic5 timestamp_dialog_alt.ui -o timestampDialog.py
# in the IPython console while in pyoteapp directory
# The help-dialog module was created by typing
# !pyuic5 helpDialog.ui -o helpDialog.py
# in the IPython console while in pyoteapp directory
# Status of points and associated dot colors ---
SELECTED = 3 # big red
BASELINE = 2 # green
INCLUDED = 1 # blue
EXCLUDED = 0 # no dot
LINESIZE = 2
acfCoefThreshold = 0.05 # To match what is being done in R-OTE 4.5.4+
# There is a bug in pyqtgraph ImageExpoter, probably caused by new versions of PyQt5 returning
# float values for image rectangles. Those floats were being given to numpy to create a matrix,
# and that was raising an exception. Below is my 'cure', effected by overriding the internal
# methods of ImageExporter that manipulate width and height
class FixedImageExporter(pex.ImageExporter):
def __init__(self, item):
pex.ImageExporter.__init__(self, item)
def makeWidthHeightInts(self):
self.params['height'] = int(self.params['height'] + 1) # The +1 is needed
self.params['width'] = int(self.params['width'] + 1)
def widthChanged(self):
sr = self.getSourceRect()
ar = float(sr.height()) / sr.width()
self.params.param('height').setValue(int(self.params['width'] * ar),
blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = float(sr.width()) / sr.height()
self.params.param('width').setValue(int(self.params['height'] * ar),
blockSignal=self.widthChanged)
class Signal:
def __init__(self):
self.__subscribers = []
def emit(self, *args, **kwargs):
for subs in self.__subscribers:
subs(*args, **kwargs)
def connect(self, func):
self.__subscribers.append(func)
mouseSignal = Signal()
class CustomViewBox(pg.ViewBox):
def __init__(self, *args, **kwds):
pg.ViewBox.__init__(self, *args, **kwds)
self.setMouseMode(self.RectMode)
# re-implement right-click to zoom out
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
self.autoRange()
mouseSignal.emit()
def mouseDragEvent(self, ev, axis=None):
if ev.button() == QtCore.Qt.RightButton:
ev.ignore()
else:
pg.ViewBox.mouseDragEvent(self, ev, axis)
mouseSignal.emit()
class TSdialog(QDialog, timestampDialog.Ui_manualTimestampDialog):
def __init__(self):
super(TSdialog, self).__init__()
self.setupUi(self)
class HelpDialog(QDialog, helpDialog.Ui_Dialog):
def __init__(self):
super(HelpDialog, self).__init__()
self.setupUi(self)
class SimplePlot(QtGui.QMainWindow, gui.Ui_MainWindow):
def __init__(self, csv_file):
super(SimplePlot, self).__init__()
# This is an externally supplied csv file path (probably from PyMovie)
self.externalCsvFilePath = csv_file
self.homeDir = os.path.split(__file__)[0]
# Change pyqtgraph plots to be black on white
pg.setConfigOption('background', (255, 255, 255)) # Do before any widgets drawn
pg.setConfigOption('foreground', 'k') # Do before any widgets drawn
pg.setConfigOptions(imageAxisOrder='row-major')
self.setupUi(self)
self.setWindowTitle('PYOTE Version: ' + version.version())
# This object is used to display tooltip help in a separate
# modeless dialog box.
self.helperThing = HelpDialog()
self.helpButton.clicked.connect(self.helpButtonClicked)
self.helpButton.installEventFilter(self)
# Checkbox: Use manual timestamp entry
self.manualTimestampCheckBox.clicked.connect(self.toggleManualEntryButton)
self.manualTimestampCheckBox.installEventFilter(self)
# Button: Manual timestamp entry
self.manualEntryPushButton.clicked.connect(self.doManualTimestampEntry)
self.manualEntryPushButton.installEventFilter(self)
# Button: Info
self.infoButton.clicked.connect(self.openHelpFile)
self.infoButton.installEventFilter(self)
# Button: Read light curve
self.readData.clicked.connect(self.readDataFromFile)
self.readData.installEventFilter(self)
# CheckBox: Show secondary star
self.showSecondaryCheckBox.clicked.connect(self.toggleDisplayOfSecondaryStar)
self.showSecondaryCheckBox.installEventFilter(self)
self.normLabel.installEventFilter(self)
# Checkbox: Show timestamp errors
self.showTimestampErrors.clicked.connect(self.toggleDisplayOfTimestampErrors)
self.showTimestampErrors.installEventFilter(self)
# Checkbox: Show underlying lightcurve
self.showUnderlyingLightcurveCheckBox.installEventFilter(self)
self.showUnderlyingLightcurveCheckBox.clicked.connect(self.reDrawMainPlot)
# Checkbox: Show error bars
self.showErrBarsCheckBox.installEventFilter(self)
self.showErrBarsCheckBox.clicked.connect(self.reDrawMainPlot)
# Checkbox: Show D and R edges
self.showEdgesCheckBox.installEventFilter(self)
self.showEdgesCheckBox.clicked.connect(self.reDrawMainPlot)
# Checkbox: Do OCR check
self.showOCRcheckFramesCheckBox.installEventFilter(self)
# QSpinBox
self.secondarySelector.valueChanged.connect(self.changeSecondary)
# QSpinBox
self.curveToAnalyzeSpinBox.valueChanged.connect(self.changePrimary)
self.curveToAnalyzeSpinBox.installEventFilter(self)
self.lightCurveNumberLabel.installEventFilter(self)
# QSpinBox: secondarySelector
self.secondarySelector.installEventFilter(self)
# line size
self.lineWidthLabel.installEventFilter(self)
self.lineWidthSpinner.valueChanged.connect(self.reDrawMainPlot)
# plotHelpButton
self.plotHelpButton.clicked.connect(self.plotHelpButtonClicked)
self.plotHelpButton.installEventFilter(self)
# Button: Trim/Select data points
self.setDataLimits.clicked.connect(self.doTrim)
self.setDataLimits.installEventFilter(self)
# Button: Smooth secondary
self.smoothSecondaryButton.clicked.connect(self.smoothRefStar)
self.smoothSecondaryButton.installEventFilter(self)
# QLineEdit: window size for secondary smoothing
# self.numSmoothPointsEdit.editingFinished.connect(self.smoothRefStar)
self.numSmoothPointsEdit.installEventFilter(self)
# Button: Normalize around selected point
self.normalizeButton.clicked.connect(self.normalize)
self.normalizeButton.installEventFilter(self)
# Button: Do block integration
self.doBlockIntegration.clicked.connect(self.doIntegration)
self.doBlockIntegration.installEventFilter(self)
# Button: Accept integration
self.acceptBlockIntegration.clicked.connect(self.applyIntegration)
self.acceptBlockIntegration.installEventFilter(self)
# Button: Mark D zone
self.markDzone.clicked.connect(self.showDzone)
self.markDzone.installEventFilter(self)
# Button: Mark R zone
self.markRzone.clicked.connect(self.showRzone)
self.markRzone.installEventFilter(self)
# Button: Calc flash edge
self.calcFlashEdge.clicked.connect(self.calculateFlashREdge)
self.calcFlashEdge.installEventFilter(self)
# Edit box: min event
self.minEventEdit.installEventFilter(self)
# Edit box: max event
self.maxEventEdit.installEventFilter(self)
# Button: Locate event
self.locateEvent.clicked.connect(self.findEvent)
self.penumbralFitCheckBox.installEventFilter(self)
# Button: Cancel operation
self.cancelButton.clicked.connect(self.requestCancel)
# Button: Calculate error bars (... write report)
self.calcErrBars.clicked.connect(self.computeErrorBars)
self.calcErrBars.installEventFilter(self)
# Button: Copy results to Asteroid Occultation Report Form (... fill Excel report)
self.fillExcelReportButton.installEventFilter(self)
self.fillExcelReportButton.clicked.connect(self.fillExcelReport)
# Button: View frame
self.viewFrameButton.clicked.connect(self.viewFrame)
self.viewFrameButton.installEventFilter(self)
self.frameNumSpinBox.installEventFilter(self)
self.fieldViewCheckBox.installEventFilter(self)
self.flipYaxisCheckBox.installEventFilter(self)
self.flipXaxisCheckBox.installEventFilter(self)
# Underlying lightcurve controls
self.underlyingLightcurveLabel.installEventFilter(self)
self.enableDiffractionCalculationBox.installEventFilter(self)
self.demoUnderlyingLighturvesButton.installEventFilter(self)
self.demoUnderlyingLighturvesButton.clicked.connect(self.demoClickedUnderlyingLightcurves)
self.exposureTimeLabel.installEventFilter(self)
self.asteroidDistanceLabel.installEventFilter(self)
self.shadowSpeedLabel.installEventFilter(self)
self.asteroidSizeLabel.installEventFilter(self)
self.pathOffsetLabel.installEventFilter(self)
self.starDiameterLabel.installEventFilter(self)
self.dLimbAngleLabel.installEventFilter(self)
self.rLimbAngleLabel.installEventFilter(self)
# Button: Write error bar plot to file
self.writeBarPlots.clicked.connect(self.exportBarPlots)
self.writeBarPlots.installEventFilter(self)
# Button: Write graphic to file
self.writePlot.clicked.connect(self.exportGraphic)
self.writePlot.installEventFilter(self)
# Button: Write csv file
self.writeCSVButton.clicked.connect(self.writeCSVfile)
self.writeCSVButton.installEventFilter(self)
# Button: Start over
self.startOver.clicked.connect(self.restart)
self.startOver.installEventFilter(self)
# Set up handlers for clicks on table view of data
self.table.cellClicked.connect(self.cellClick)
self.table.verticalHeader().sectionClicked.connect(self.rowClick)
self.table.installEventFilter(self)
self.helpLabelForDataGrid.installEventFilter(self)
# Re-instantiate mainPlot Note: examine gui.py
# to get this right after a re-layout !!!! self.widget changes sometimes
# as does horizontalLayout_?
oldMainPlot = self.mainPlot
self.mainPlot = PlotWidget(self.splitter_2,
viewBox=CustomViewBox(border=(255, 255, 255)),
enableMenu=False, stretch=1)
# self.mainPlot.setMinimumSize(QtCore.QSize(800, 0))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mainPlot.sizePolicy().hasHeightForWidth())
self.mainPlot.setSizePolicy(sizePolicy)
self.mainPlot.setObjectName("mainPlot")
self.splitter_2.addWidget(self.mainPlot)
oldMainPlot.setParent(None)
self.mainPlot.scene().sigMouseMoved.connect(self.reportMouseMoved)
self.verticalCursor = pg.InfiniteLine(angle=90, movable=False, pen=(0, 0, 0))
self.mainPlot.addItem(self.verticalCursor)
self.blankCursor = True
self.mainPlot.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.BlankCursor))
mouseSignal.connect(self.mouseEvent)
# Set up handler for clicks on data plot
self.mainPlot.scene().sigMouseClicked.connect(self.processClick)
self.mainPlotViewBox = self.mainPlot.getViewBox()
self.mainPlotViewBox.rbScaleBox.setPen(pg.mkPen((255, 0, 0), width=2))
self.mainPlotViewBox.rbScaleBox.setBrush(pg.mkBrush(None))
self.mainPlot.hideButtons()
self.mainPlot.showGrid(y=True, alpha=1.0)
self.extra = []
self.aperture_names = []
self.initializeTableView() # Mostly just establishes column headers
# Open (or create) file for holding 'sticky' stuff
self.settings = QSettings('simple-ote.ini', QSettings.IniFormat)
self.settings.setFallbacksEnabled(False)
# Use 'sticky' settings to size and position the main screen
self.resize(self.settings.value('size', QSize(800, 800)))
self.move(self.settings.value('pos', QPoint(50, 50)))
usediff = self.settings.value('usediff', 'true') == 'true'
self.enableDiffractionCalculationBox.setChecked(usediff)
doOCRcheck = self.settings.value('doOCRcheck', 'true') == 'true'
self.showOCRcheckFramesCheckBox.setChecked(doOCRcheck)
self.yValues = None
self.outliers = []
self.timeDelta = None
self.logFile = ''
self.left = None
self.right = None
self.selPts = []
self.initializeVariablesThatDontDependOnAfile()
self.pathToVideo = None
self.cascadePosition = None
self.cascadeDelta = 25
self.frameViews = []
self.fieldMode = False
self.d_underlying_lightcurve = None
self.r_underlying_lightcurve = None
self.d_candidates = None
self.d_candidate_entry_nums = None
self.r_candidates = None
self.r_candidate_entry_nums = None
self.b_intensity = None
self.a_intensity = None
self.penumbral_noise = None
self.penumbralDerrBar = None
self.penumbralRerrBar = None
self.lastDmetric = 0.0
self.lastRmetric = 0.0
self.xlsxDict = {}
self.checkForNewVersion()
self.copy_desktop_icon_file_to_home_directory()
self.only_new_solver_wanted = True
self.helperThing = HelpDialog()
if self.externalCsvFilePath is not None:
if os.path.exists(self.externalCsvFilePath):
self.showMsg(f'We will read: {self.externalCsvFilePath}')
self.readDataFromFile()
else:
self.showMsg(f'Could not find csv file specified: {self.externalCsvFilePath}')
self.externalCsvFilePath = None
def fillExcelReport(self):
# Open a file select dialog
xlsxfilepath, _ = QFileDialog.getOpenFileName(
self, # parent
"Select Asteroid Occultation Report form", # title for dialog
self.settings.value('lightcurvedir', ""), # starting directory
"Excel files (*.xlsx)")
if xlsxfilepath:
# noinspection PyBroadException
wb = load_workbook(xlsxfilepath)
try:
sheet = wb['DATA']
# Validate that a proper Asteroid Occultation Report Form was selected by reading the report header
if not sheet['G1'].value == 'Asteroid Occultation Report Form':
self.showMsg(f'The xlsx file selected does not appear to be an Asteroid Occultation Report Form')
return
# We're going to ignore the named cell info and reference all the cells of interest by
# their col/row coordinates (not all the cells of interest had names)
Derr68 = 'L33'
Derr95 = 'M33'
Derr99 = 'N33'
Rerr68 = 'L35'
Rerr95 = 'M35'
Rerr99 = 'N35'
Dhour = 'F32'
Dmin = 'H32'
Dsec = 'J32'
Rhour = 'F36'
Rmin = 'H36'
Rsec = 'J36'
# Exposure = 'P25'
OTA = 'O23'
SNR = 'W40'
Comment = 'D43'
sheet[OTA].value = 'PYOTE'
sheet[SNR].value = f'{self.snrB:0.2f}'
if 'Comment' in self.xlsxDict:
sheet[Comment].value = self.xlsxDict['Comment']
if 'Derr68' in self.xlsxDict:
# sheet[Derr68].value = f'{self.xlsxDict["Derr68"]:0.2f}'
sheet[Derr68].value = self.xlsxDict["Derr68"]
if 'Derr95' in self.xlsxDict:
# sheet[Derr95].value = f'{self.xlsxDict["Derr95"]:0.2f}'
sheet[Derr95].value = self.xlsxDict["Derr95"]
if 'Derr99' in self.xlsxDict:
# sheet[Derr99].value = f'{self.xlsxDict["Derr99"]:0.2f}'
sheet[Derr99].value = self.xlsxDict["Derr99"]
if 'Rerr68' in self.xlsxDict:
# sheet[Rerr68].value = f'{self.xlsxDict["Rerr68"]:0.2f}'
sheet[Rerr68].value = self.xlsxDict["Rerr68"]
if 'Rerr95' in self.xlsxDict:
# sheet[Rerr95].value = f'{self.xlsxDict["Rerr95"]:0.2f}'
sheet[Rerr95].value = self.xlsxDict["Rerr95"]
if 'Rerr99' in self.xlsxDict:
# sheet[Rerr99].value = f'{self.xlsxDict["Rerr99"]:0.2f}'
sheet[Rerr99].value = self.xlsxDict["Rerr99"]
if 'Dhour' in self.xlsxDict:
sheet[Dhour] = int(self.xlsxDict['Dhour'])
if 'Dmin' in self.xlsxDict:
sheet[Dmin] = int(self.xlsxDict['Dmin'])
if 'Dsec' in self.xlsxDict:
sheet[Dsec] = float(self.xlsxDict['Dsec'])
if 'Rhour' in self.xlsxDict:
sheet[Rhour] = int(self.xlsxDict['Rhour'])
if 'Rmin' in self.xlsxDict:
sheet[Rmin] = int(self.xlsxDict['Rmin'])
if 'Rsec' in self.xlsxDict:
sheet[Rsec] = float(self.xlsxDict['Rsec'])
# Overwriting the original file !!!
wb.save(xlsxfilepath)
except Exception as e:
self.showMsg(repr(e))
self.showMsg(f'FAILED to fill Asteroid Occultation Report Form', color='red', bold=True)
self.showMsg(f'Is it possible that you have the file already open somewjere?', color='red', bold=True)
return
self.showMsg(f'Excel spreadsheet Asteroid Report Form entries made successfully.')
# noinspection PyBroadException
try:
if platform.system() == 'Darwin':
subprocess.call(['open', xlsxfilepath])
elif platform.system() == 'Windows':
os.startfile(xlsxfilepath)
else:
subprocess.call(['xdg-open', xlsxfilepath])
except Exception as e:
self.showMsg('Attempt to get host OS to open xlsx file failed.', color='red', bold=True)
self.showMsg(repr(e))
# OS = sys.platform
# if OS == 'darwin' or OS == 'linux':
# subprocess.check_call(['open', xlsxfilepath])
# else:
# subprocess.check_call(['start', xlsxfilepath])
# Fill with our current values
else:
return
def validateLightcurveDataInput(self):
ans = {'success': True}
# Process exp dur entry
try:
exp_dur_str = self.expDurEdit.text().strip()
if not exp_dur_str:
ans.update({'exp_dur': None})
else:
exp_dur = float(exp_dur_str)
if exp_dur > 0.0:
ans.update({'exp_dur': exp_dur})
else:
self.showMsg(f'exposure duration must be > 0.0', bold=True)
ans.update({'exp_dur': None})
ans.update({'success': False})
except ValueError as e:
self.showMsg(f'{e}', bold=True)
ans.update({'exp_dur': None})
ans.update({'success': False})
# Process ast_dist entry
try:
ast_dist_str = self.asteroidDistanceEdit.text().strip()
if not ast_dist_str:
ans.update({'ast_dist': None})
else:
ast_dist = float(ast_dist_str)
if ast_dist > 0.0:
ans.update({'ast_dist': ast_dist})
else:
self.showMsg(f'ast_dist must be > 0.0', bold=True)
ans.update({'ast_dist': None})
ans.update({'success': False})
except ValueError as e:
self.showMsg(f'{e}', bold=True)
ans.update({'ast_dist': None})
ans.update({'success': False})
# Process shadow_speed entry
try:
shadow_speed_str = self.shadowSpeedEdit.text().strip()
if not shadow_speed_str:
ans.update({'shadow_speed': None})
else:
shadow_speed = float(shadow_speed_str)
if shadow_speed > 0.0:
ans.update({'shadow_speed': shadow_speed})
else:
self.showMsg(f'shadow speed must be > 0.0', bold=True)
ans.update({'shadow_speed': None})
ans.update({'success': False})
except ValueError as e:
self.showMsg(f'{e}', bold=True)
ans.update({'shadow_speed': None})
ans.update({'success': False})
# Process asteroid diameter
try:
ast_diam_str = self.astSizeEdit.text().strip()
if not ast_diam_str:
ans.update({'ast_diam': None})
else:
ast_diam = float(ast_diam_str)
if ast_diam > 0.0:
ans.update({'ast_diam': ast_diam})
else:
self.showMsg(f'asteroid diameter must be > 0.0 or missing', bold=True)
ans.update({'ast_diam': None})
ans.update({'success': False})
except ValueError as e:
self.showMsg(f'{e}', bold=True)
ans.update({'ast_diam': None})
ans.update({'success': False})
# Process centerline offset
try:
centerline_offset_str = self.pathOffsetEdit.text().strip()
if not centerline_offset_str:
ans.update({'centerline_offset': None})
else:
if ans['ast_diam'] is None:
ans.update({'centerline_offset': None})
ans.update({'success': False})
self.showMsg(f'centerline offset requires an asteroid diameter to be specified', bold=True)
else:
centerline_offset = float(centerline_offset_str)
if 0.0 <= centerline_offset < ans['ast_diam'] / 2:
ans.update({'centerline_offset': centerline_offset})
else:
self.showMsg(f'centerline offset must be positive and less than the asteroid radius', bold=True)
ans.update({'centerline_offset': None})
ans.update({'success': False})
except ValueError as e:
self.showMsg(f'{e}', bold=True)
ans.update({'centerline_offset': None})
ans.update({'success': False})
# Process star diam entry
try:
star_diam_str = self.starDiameterEdit.text().strip()
if not star_diam_str:
ans.update({'star_diam': None})
self.penumbralFitCheckBox.setChecked(False)
self.penumbralFitCheckBox.setEnabled(False)
else:
star_diam = float(star_diam_str)
if star_diam > 0.0:
ans.update({'star_diam': star_diam})
self.penumbralFitCheckBox.setEnabled(True)
else:
self.showMsg(f'star diameter must be > 0.0 or missing', bold=True)
ans.update({'star_diam': None})
ans.update({'success': False})
self.penumbralFitCheckBox.setChecked(False)
self.penumbralFitCheckBox.setEnabled(False)
except ValueError as e:
self.showMsg(f'{e}', bold=True)
ans.update({'star_diam': None})
ans.update({'success': False})
self.penumbralFitCheckBox.setChecked(False)
self.penumbralFitCheckBox.setEnabled(False)
# Process D limb angle entry
d_angle = self.dLimbAngle.value()
ans.update({'d_angle': d_angle})
# Process R limb angle entry
r_angle = self.rLimbAngle.value()
ans.update({'r_angle': r_angle})
return ans
# This method is needed because you cannot pass parameters from a clicked-connect
def demoClickedUnderlyingLightcurves(self):
if self.B is None:
self.demoUnderlyingLightcurves(baseline=100.0, event=0.0, plots_wanted=True, ignore_timedelta=True)
else:
self.demoUnderlyingLightcurves(baseline=self.B, event=self.A, plots_wanted=True, ignore_timedelta=True)
def demoUnderlyingLightcurves(self, baseline=None, event=None, plots_wanted=False, ignore_timedelta=False):
diff_table_name = f'diffraction-table.p'
diff_table_path = os.path.join(self.homeDir, diff_table_name)
ans = self.validateLightcurveDataInput()
if not ans['success']:
self.showInfo('There is a problem with the data entry.\n\nCheck log for details.')
return
if not ignore_timedelta:
if self.timeDelta is None or self.timeDelta < 0.001:
if ans['exp_dur'] is not None:
frame_time = ans['exp_dur']
else:
frame_time = 0.001
else:
frame_time = self.timeDelta
else:
if ans['exp_dur'] is not None:
frame_time = ans['exp_dur']
else:
frame_time = 0.001
if ans['exp_dur'] is not None and ans['ast_dist'] is None and ans['shadow_speed'] is None:
pass # User wants to ignore diffraction effects
else:
if self.enableDiffractionCalculationBox.isChecked() and \
(ans['ast_dist'] is None or ans['shadow_speed'] is None):
self.showMsg(f'Cannot compute diffraction curve without both ast distance and shadow speed!', bold=True)
return None
if ans['ast_dist'] is not None:
fresnel_length_at_500nm = fresnel_length_km(distance_AU=ans['ast_dist'], wavelength_nm=500.0)
if plots_wanted:
self.showMsg(f'Fresnel length @ 500nm: {fresnel_length_at_500nm:.4f} km', bold=True, color='green')
if ans['star_diam'] is not None and (ans['d_angle'] is None or ans['r_angle'] is None):
ans.update({'star_diam': None})
self.showMsg(f'An incomplete set of star parameters was entered --- treating star_diam as None!', bold=True)
elif ans['star_diam'] is not None and (ans['ast_dist'] is None or ans['shadow_speed'] is None):
ans.update({'star_diam': None})
self.showMsg(f'Need dist and shadow speed to utilize star diam --- treating star_diam as None!', bold=True)
# noinspection PyBroadException
try:
matplotlib.pyplot.close(self.d_underlying_lightcurve)
matplotlib.pyplot.close(self.r_underlying_lightcurve)
except Exception:
pass
self.d_underlying_lightcurve, self.r_underlying_lightcurve, ans = generate_underlying_lightcurve_plots(
diff_table_path=diff_table_path,
b_value=baseline,
a_value=event,
frame_time=frame_time,
ast_dist=ans['ast_dist'],
shadow_speed=ans['shadow_speed'],
ast_diam=ans['ast_diam'],
centerline_offset=ans['centerline_offset'],
star_diam=ans['star_diam'],
d_angle=ans['d_angle'],
r_angle=ans['r_angle'],
suppress_diffraction=not self.enableDiffractionCalculationBox.isChecked(),
title_addon=''
)
if plots_wanted:
self.d_underlying_lightcurve.show()
self.r_underlying_lightcurve.show()
else:
matplotlib.pyplot.close(self.d_underlying_lightcurve)
matplotlib.pyplot.close(self.r_underlying_lightcurve)
return ans
def findTimestampFromFrameNumber(self, frame):
# Currently PyMovie uses nn.00 for frame number
# Limovie uses nn.0 for frame number
# We use the 'starts with' flag so that we pick up both forms
items = self.table.findItems(f'{frame:0.1f}', QtCore.Qt.MatchStartsWith)
for item in items:
if item.column() == 0: # Avoid a possible match from a data column
ts = self.table.item(item.row(), 1).text()
return ts
return ''
def showAnnotatedFrame(self, frame_to_show, annotation):
frame_number = frame_to_show
table_timestamp = self.findTimestampFromFrameNumber(frame_to_show)
if not table_timestamp:
table_timestamp = 'no timestamp found'
if self.pathToVideo is None:
return
_, ext = os.path.splitext(self.pathToVideo)
if ext == '.avi':
ans = readAviFile(frame_number, full_file_path=self.pathToVideo)
if not ans['success']:
self.showMsg(f'Attempt to view frame returned errmsg: {ans["errmsg"]}')
return
elif ext == '.ser':
ans = readSerFile(frame_number, full_file_path=self.pathToVideo)
if not ans['success']:
self.showMsg(f'Attempt to view frame returned errmsg: {ans["errmsg"]}')
return
elif ext == '':
# We assume its a FITS folder that we have been given
ans = readFitsFile(frame_number, full_file_path=self.pathToVideo)
if not ans['success']:
self.showMsg(f'Attempt to view frame returned errmsg: {ans["errmsg"]}')
return
elif ext == '.aav':
ans = readAavFile(frame_number, full_file_path=self.pathToVideo)
if not ans['success']:
self.showMsg(f'Attempt to view frame returned errmsg: {ans["errmsg"]}')
return
else:
self.showMsg(f'Unsupported file extension: {ext}')
return
# Check to see if user has closed all frame views
frame_visible = False
for frame_view in self.frameViews:
if frame_view and frame_view.isVisible():
frame_visible = True
if not frame_visible:
self.cascadePosition = 100
title = f'{annotation} {table_timestamp} @ frame {frame_number}'
self.frameViews.append(pg.GraphicsWindow(title=title))
cascade_origin = self.pos() + QPoint(self.cascadePosition, self.cascadePosition)
self.frameViews[-1].move(cascade_origin)
self.cascadePosition += self.cascadeDelta
self.frameViews[-1].resize(1000, 600)
layout = QtGui.QGridLayout()
self.frameViews[-1].setLayout(layout)
imv = pg.ImageView()
layout.addWidget(imv, 0, 0)
imv.ui.menuBtn.hide()
imv.ui.roiBtn.hide()
image = ans['image']
if self.fieldViewCheckBox.isChecked():
upper_field = image[0::2, :]
lower_field = image[1::2, :]
image = np.concatenate((upper_field, lower_field))
if self.flipYaxisCheckBox.isChecked():
image = np.flipud(image)
if self.flipXaxisCheckBox.isChecked():
image = np.fliplr(image)
imv.setImage(image)
for i, frame_view in enumerate(self.frameViews):
if frame_view and not frame_view.isVisible():
# User has closed the image. Remove it so that garbage collection occurs.
self.frameViews[i].close()
self.frameViews[i] = None
else:
if frame_view:
frame_view.raise_()
def viewFrame(self):
if self.pathToVideo is None:
return
frame_to_show = self.frameNumSpinBox.value()
self.showAnnotatedFrame(frame_to_show=frame_to_show, annotation='User selected frame:')
def helpButtonClicked(self):
self.showHelp(self.helpButton)
def plotHelpButtonClicked(self):
self.showHelp(self.plotHelpButton)
def showHelp(self, obj):
if obj.toolTip():
self.helperThing.raise_()
self.helperThing.show()
self.helperThing.textEdit.clear()
self.helperThing.textEdit.insertHtml(obj.toolTip())
@staticmethod
def processKeystroke(event):
_ = event.key() # Just to satisfy PEP8
return False
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.KeyPress:
handled = self.processKeystroke(event)
if handled:
return True
else:
return super(SimplePlot, self).eventFilter(obj, event)
if event.type() == QtCore.QEvent.MouseButtonPress:
if event.button() == QtCore.Qt.RightButton:
if obj.toolTip():
self.helperThing.raise_()
self.helperThing.show()
self.helperThing.textEdit.clear()
self.helperThing.textEdit.insertHtml(obj.toolTip())
return True
return super(SimplePlot, self).eventFilter(obj, event)
# return False
if event.type() == QtCore.QEvent.ToolTip:
return True
return super(SimplePlot, self).eventFilter(obj, event)
# return False
def writeCSVfile(self):
_, name = os.path.split(self.filename)
name = self.removeCsvExtension(name)
name += '.PYOTE.csv'
myOptions = QFileDialog.Options()
# myOptions |= QFileDialog.DontConfirmOverwrite
myOptions |= QFileDialog.DontUseNativeDialog
myOptions |= QFileDialog.ShowDirsOnly
self.csvFile, _ = QFileDialog.getSaveFileName(
self, # parent
"Select directory/modify filename", # title for dialog
self.settings.value('lightcurvedir', "") + '/' + name, # starting directory
"", options=myOptions)
if self.csvFile:
with open(self.csvFile, 'w') as fileObject:
if not self.aperture_names:
fileObject.write('# ' + 'PYOTE ' + version.version() + '\n')
else:
fileObject.write('# PyMovie file written by ' + 'PYOTE ' + version.version() + '\n')
for hdr in self.headers:
fileObject.write('# ' + hdr)
if not self.aperture_names:
# Handle non-PyMovie csv file
columnHeadings = 'FrameNum,timeInfo,primaryData'
if len(self.LC2) > 0:
columnHeadings += ',LC2'
if len(self.LC3) > 0:
columnHeadings += ',LC3'
if len(self.LC4) > 0:
columnHeadings += ',LC4'
else:
columnHeadings = 'FrameNum,timeInfo'
for column_name in self.aperture_names:
columnHeadings += f',signal-{column_name}'
fileObject.write(columnHeadings + '\n')
for i in range(self.table.rowCount()):
if self.left <= i <= self.right:
line = self.table.item(i, 0).text()
for j in range(1, self.table.columnCount()):
# Deal with empty columns
if self.table.item(i, j) is not None:
line += ',' + self.table.item(i, j).text()
fileObject.write(line + '\n')
@staticmethod
def copy_desktop_icon_file_to_home_directory():
if platform.mac_ver()[0]:
icon_dest_path = f"{os.environ['HOME']}{r'/Desktop/run-pyote'}"
if not os.path.exists(icon_dest_path):
# Here is where the .bat file will be when running an installed pyote
icon_src_path = f"{os.environ['HOME']}" + r"/Anaconda3/Lib/site-packages/pyoteapp/run-pyote-mac.bat"
if not os.path.exists(icon_src_path):
# But here is where the .bat file is during a development run
icon_src_path = os.path.join(os.path.split(__file__)[0], 'run-pyote-mac.bat')
with open(icon_src_path) as src, open(icon_dest_path, 'w') as dest:
dest.writelines(src.readlines())
os.chmod(icon_dest_path, 0o755) # Make it executable
else:
# We must be on a Windows machine because Mac version number was empty
icon_dest_path = r"C:\Anaconda3\PYOTE.bat"
if not os.path.exists(icon_dest_path):
# Here is where the .bat file will be when running an installed pyote
icon_src_path = r"C:\Anaconda3\Lib\site-packages\pyoteapp\PYOTE.bat"
if not os.path.exists(icon_src_path):
# But here is where the .bat file is during a development run
icon_src_path = os.path.join(os.path.split(__file__)[0], 'PYOTE.bat')
with open(icon_src_path) as src, open(icon_dest_path, 'w') as dest:
dest.writelines(src.readlines())
def toggleManualEntryButton(self):
if self.manualTimestampCheckBox.isChecked():
self.manualEntryPushButton.setEnabled(True)
else:
self.manualEntryPushButton.setEnabled(False)
def openHelpFile(self):
helpFilePath = os.path.join(os.path.split(__file__)[0], 'pyote-info.pdf')
url = QtCore.QUrl.fromLocalFile(helpFilePath)
fileOpened = QtGui.QDesktopServices.openUrl(url)
if not fileOpened:
self.showMsg('Failed to open pyote-info.pdf', bold=True, color='red', blankLine=False)
self.showMsg('Location of pyote information file: ' + helpFilePath, bold=True, color='blue')
def mouseEvent(self):
if not self.blankCursor:
# self.showMsg('Mouse event')
self.blankCursor = True
self.mainPlot.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.BlankCursor))
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_Shift:
# self.showMsg('Shift key pressed')
if self.blankCursor:
self.mainPlot.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.blankCursor = False
else:
self.blankCursor = True
self.mainPlot.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.BlankCursor))
@staticmethod
def timestampListIsEmpty(alist):
ans = True
for item in alist:
# Limovie = '[::]' Tangra = '' R-OTE = '[NA]' or 'NA'
if item == '' or item == '[::]' or item == '[NA]' or item == 'NA':
pass
else:
ans = False
break
return ans
def changeSecondary(self):
# Resolve curve-to-analyze and normalization-curve being the same
prim = self.curveToAnalyzeSpinBox.value()
norm = self.secondarySelector.value()
if prim == norm:
if prim == 1:
self.curveToAnalyzeSpinBox.setValue(2)
else:
self.curveToAnalyzeSpinBox.setValue(norm - 1)
selText = self.secondarySelector.text()
refNum = int(selText)
if self.aperture_names:
self.showMsg('Secondary reference ' + selText + ' selected. PyMovie aperture name: ' +
self.aperture_names[refNum - 1])
else:
self.showMsg('Secondary reference ' + selText + ' selected.')
if refNum == 1:
self.yRefStar = self.LC1
if refNum == 2:
self.yRefStar = self.LC2
if refNum == 3:
self.yRefStar = self.LC3
if refNum == 4:
self.yRefStar = self.LC4
if refNum > 4:
self.yRefStar = self.extra[refNum - 4 - 1]
self.smoothSecondary = []
self.reDrawMainPlot()
self.mainPlot.autoRange()
def changePrimary(self):
# Resolve curve-to-analyze and normalization-curve being the same
prim = self.curveToAnalyzeSpinBox.value()
norm = self.secondarySelector.value()
if prim == norm:
if norm == 1:
self.secondarySelector.setValue(2)
else:
self.secondarySelector.setValue(prim - 1)
selText = self.curveToAnalyzeSpinBox.text()
refNum = int(selText)
if self.aperture_names:
self.showMsg('Analyze light curve ' + selText + ' selected. PyMovie aperture name: ' +
self.aperture_names[refNum - 1])
else:
self.showMsg('Analyze light curve ' + selText + ' selected.')
if refNum == 1:
self.yValues = self.LC1
if refNum == 2:
self.yValues = self.LC2
if refNum == 3:
self.yValues = self.LC3
if refNum == 4:
self.yValues = self.LC4
if refNum > 4:
self.yValues = self.extra[refNum - 4 - 1].copy()
self.solution = None
self.reDrawMainPlot()
self.mainPlot.autoRange()
def installLatestVersion(self, pyoteversion):
self.showMsg(f'Asking to upgrade to: {pyoteversion}')
pipResult = upgradePyote(pyoteversion)
for line in pipResult:
self.showMsg(line, blankLine=False)
self.showMsg('', blankLine=False)
self.showMsg('The new version is installed but not yet running.', color='red', bold=True)
self.showMsg('Close and reopen pyote to start the new version running.', color='red', bold=True)
def checkForNewVersion(self):
gotVersion, latestVersion = getMostRecentVersionOfPyOTEViaJson()
if gotVersion:
if latestVersion <= version.version():
self.showMsg(f'Found the latest version is: {latestVersion}')
self.showMsg('You are running the most recent version of PyOTE', color='red', bold=True)
else:
self.showMsg('Version ' + latestVersion + ' is available', color='red', bold=True)
if self.queryWhetherNewVersionShouldBeInstalled() == QMessageBox.Yes:
self.showMsg('You have opted to install latest version of PyOTE')
self.installLatestVersion(f'pyote=={latestVersion}')
else:
self.showMsg('You have declined the opportunity to install latest PyOTE')
else:
self.showMsg(f'latestVersion found: {latestVersion}')
@staticmethod
def queryWhetherNewVersionShouldBeInstalled():
msg = QMessageBox()
msg.setIcon(QMessageBox.Question)
msg.setText('A newer version of PyOTE is available. Do you wish to install it?')
msg.setWindowTitle('Get latest version of PyOTE query')
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
retval = msg.exec_()
return retval
@staticmethod
def queryWhetherBlockIntegrationShouldBeAcccepted():
msg = QMessageBox()
msg.setIcon(QMessageBox.Question)
msg.setText(
'Do you want the pyote estimation of block integration parameters to be used'
' for block integration?')
msg.setWindowTitle('Is auto-determined block integration ok')
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
retval = msg.exec_()
return retval
def reportMouseMoved(self, pos):
# self.showMsg(str(pos.x()))
mousePoint = self.mainPlotViewBox.mapSceneToView(pos)
# self.showMsg(str(mousePoint.x()))
self.verticalCursor.setPos(round(mousePoint.x()))
def writeDefaultGraphicsPlots(self):
self.graphicFile, _ = os.path.splitext(self.filename)
exporter = FixedImageExporter(self.dBarPlotItem)
exporter.makeWidthHeightInts()
targetFileD = self.graphicFile + '.D.PYOTE.png'
exporter.export(targetFileD)
exporter = FixedImageExporter(self.durBarPlotItem)
exporter.makeWidthHeightInts()
targetFileDur = self.graphicFile + '.R-D.PYOTE.png'
exporter.export(targetFileDur)
exporter = FixedImageExporter(self.falsePositivePlotItem)
exporter.makeWidthHeightInts()
targetFileDur = self.graphicFile + '.false-positive.PYOTE.png'
exporter.export(targetFileDur)
exporter = FixedImageExporter(self.mainPlot.getPlotItem())
exporter.makeWidthHeightInts()
targetFile = self.graphicFile + '.PYOTE.png'
exporter.export(targetFile)
def exportBarPlots(self):
if self.dBarPlotItem is None:
self.showInfo('No error bar plots available yet')
return
_, name = os.path.split(self.filename)
name = self.removeCsvExtension(name)
myOptions = QFileDialog.Options()
myOptions |= QFileDialog.DontConfirmOverwrite
myOptions |= QFileDialog.DontUseNativeDialog
myOptions |= QFileDialog.ShowDirsOnly
self.graphicFile, _ = QFileDialog.getSaveFileName(
self, # parent
"Select directory/modify filename (png will be appended for you)", # title for dialog
self.settings.value('lightcurvedir', "") + '/' + name, # starting directory
# "csv files (*.csv)", options=myOptions)
"png files (*.png)", options=myOptions)
if self.graphicFile:
self.graphicFile = self.removeCsvExtension(self.graphicFile)
exporter = FixedImageExporter(self.dBarPlotItem)
exporter.makeWidthHeightInts()
targetFileD = self.graphicFile + '.D.PYOTE.png'
exporter.export(targetFileD)
exporter = FixedImageExporter(self.durBarPlotItem)
exporter.makeWidthHeightInts()
targetFileDur = self.graphicFile + '.R-D.PYOTE.png'
exporter.export(targetFileDur)
exporter = FixedImageExporter(self.falsePositivePlotItem)
exporter.makeWidthHeightInts()
targetFileDur = self.graphicFile + '.false-positive.PYOTE.png'
exporter.export(targetFileDur)
self.showInfo('Wrote to: \r\r' + targetFileD + ' \r\r' + targetFileDur)
@staticmethod
def removeCsvExtension(path):
base, ext = os.path.splitext(path)
if ext == '.csv':
return base
else:
return path
def exportGraphic(self):
_, name = os.path.split(self.filename)
name = self.removeCsvExtension(name)
myOptions = QFileDialog.Options()
myOptions |= QFileDialog.DontConfirmOverwrite
myOptions |= QFileDialog.DontUseNativeDialog
myOptions |= QFileDialog.ShowDirsOnly
self.graphicFile, _ = QFileDialog.getSaveFileName(
self, # parent
"Select directory/modify filename (png will be appended for you)", # title for dialog
self.settings.value('lightcurvedir', "") + '/' + name, # starting directory
"png files (*.png)", options=myOptions)
if self.graphicFile:
self.graphicFile = self.removeCsvExtension(self.graphicFile)
exporter = FixedImageExporter(self.mainPlot.getPlotItem())
exporter.makeWidthHeightInts()
targetFile = self.graphicFile + '.PYOTE.png'
exporter.export(targetFile)
self.showInfo('Wrote to: \r\r' + targetFile)
def initializeVariablesThatDontDependOnAfile(self):
self.left = None # Used during block integration
self.right = None # "
self.selPts = [] # "
self.penumbralFitCheckBox.setEnabled(False)
self.penumbralFitCheckBox.setChecked(False)
self.flashEdges = []
self.normalized = False
self.timesAreValid = True # until we find out otherwise
self.selectedPoints = {} # Clear/declare 'selected points' dictionary
self.baselineXvals = []
self.baselineYvals = []
self.underlyingLightcurveAns = None
self.solution = None
self.firstPassSolution = None
self.secondPassSolution = None
self.smoothSecondary = []
self.corCoefs = []
self.numPtsInCorCoefs = 0
self.Doffset = 1 # Offset (in readings) between D and 'start of exposure'
self.Roffset = 1 # Offset (in readings) between R and 'start of exposure'
self.sigmaB = None
self.sigmaA = None
self.A = None
self.B = None
self.snrB = None
self.snrA = None
self.dRegion = None
self.rRegion = None
self.dLimits = []
self.rLimits = []
self.minEvent = None
self.maxEvent = None
self.solution = None
self.eventType = 'none'
self.cancelRequested = False
self.deltaDlo68 = 0
self.deltaDlo95 = 0
self.deltaDhi68 = 0
self.deltaDhi95 = 0
self.deltaRlo68 = 0
self.deltaRlo95 = 0
self.deltaRhi68 = 0
self.deltaRhi95 = 0
self.deltaDurlo68 = 0
self.deltaDurlo95 = 0
self.deltaDurhi68 = 0
self.deltaDurhi95 = 0
self.plusD = None
self.minusD = None
self.plusR = None
self.minusR = None
self.dBarPlotItem = None
self.durBarPlotItem = None
self.errBarWin = None
def requestCancel(self):
self.cancelRequested = True
# The following line was just used to test uncaught exception handling
# raise Exception('The requestCancel devil made me do it')
def showDzone(self):
# If the user has not selected any points, we remove any dRegion that may
# have been present
if len(self.selectedPoints) == 0:
self.dRegion = None
self.dLimits = None
self.reDrawMainPlot()
return
if len(self.selectedPoints) != 2:
self.showInfo('Exactly two points must be selected for this operation.')
return
selIndices = [key for key, _ in self.selectedPoints.items()]
selIndices.sort()
leftEdge = int(min(selIndices))
rightEdge = int(max(selIndices))
if self.rLimits:
if rightEdge > self.rLimits[0] - 2: # Enforce at least 1 'a' point
rightEdge = self.rLimits[0] - 2
if self.rLimits[1] < self.right: # At least 1 'b' point is present
if leftEdge < self.left:
leftEdge = self.left
else:
if leftEdge < self.left + 1:
leftEdge = self.left + 1
else:
if rightEdge >= self.right - 1:
rightEdge = self.right - 1 # Enforce at least 1 'a' point
if leftEdge < self.left + 1:
leftEdge = self.left + 1 # Enforce at least 1 'b' point
if rightEdge < self.left or rightEdge <= leftEdge:
self.removePointSelections()
self.reDrawMainPlot()
return
self.setDataLimits.setEnabled(False)
if self.only_new_solver_wanted:
self.locateEvent.setEnabled(True)
self.dLimits = [leftEdge, rightEdge]
if self.rLimits:
self.DandR.setChecked(True)
else:
self.Donly.setChecked(True)
self.dRegion = pg.LinearRegionItem(
[leftEdge, rightEdge], movable=False, brush=(0, 200, 0, 50))
self.mainPlot.addItem(self.dRegion)
self.showMsg('D zone selected: ' + str([leftEdge, rightEdge]))
self.removePointSelections()
self.reDrawMainPlot()
def showRzone(self):
# If the user has not selected any points, we remove any rRegion that may
# have been present
if len(self.selectedPoints) == 0:
self.rRegion = None
self.rLimits = None
self.reDrawMainPlot()
return
if len(self.selectedPoints) != 2:
self.showInfo('Exactly two points must be selected for this operation.')
return
selIndices = [key for key, _ in self.selectedPoints.items()]
selIndices.sort()
leftEdge = int(min(selIndices))
rightEdge = int(max(selIndices))
if self.dLimits:
if leftEdge < self.dLimits[1] + 2:
leftEdge = self.dLimits[1] + 2 # Enforce at least 1 'a' point
if self.dLimits[0] == self.left:
if rightEdge >= self.right:
rightEdge = self.right - 1 # Enforce at least 1 'b' point
else:
if rightEdge >= self.right:
rightEdge = self.right
else:
if rightEdge >= self.right - 1:
rightEdge = self.right - 1 # Enforce 1 'a' (for r-only search)
if leftEdge < self.left + 1:
leftEdge = self.left + 1 # Enforce 1 'b' point
if rightEdge <= leftEdge:
self.removePointSelections()
self.reDrawMainPlot()
return
self.setDataLimits.setEnabled(False)
if self.only_new_solver_wanted:
self.locateEvent.setEnabled(True)
self.rLimits = [leftEdge, rightEdge]
if self.dLimits:
self.DandR.setChecked(True)
else:
self.Ronly.setChecked(True)
self.rRegion = pg.LinearRegionItem(
[leftEdge, rightEdge], movable=False, brush=(200, 0, 0, 50))
self.mainPlot.addItem(self.rRegion)
self.showMsg('R zone selected: ' + str([leftEdge, rightEdge]))
self.removePointSelections()
self.reDrawMainPlot()
def calculateFlashREdge(self):
if len(self.selectedPoints) != 2:
self.showInfo(
'Exactly two points must be selected for this operation.')
return
selIndices = [key for key, _ in self.selectedPoints.items()]
selIndices.sort()
savedLeft = self.left
savedRight = self.right
leftEdge = int(min(selIndices))
rightEdge = int(max(selIndices))
self.left = leftEdge
self.right = rightEdge
if self.dLimits:
if leftEdge < self.dLimits[1] + 2:
leftEdge = self.dLimits[1] + 2 # Enforce at least 1 'a' point
if self.dLimits[0] == self.left:
if rightEdge >= self.right:
rightEdge = self.right - 1 # Enforce at least 1 'b' point
else:
if rightEdge >= self.right:
rightEdge = self.right
else:
if rightEdge >= self.right - 1:
rightEdge = self.right - 1 # Enforce 1 'a' (for r-only search)
if leftEdge < self.left + 1:
leftEdge = self.left + 1 # Enforce 1 'b' point
if rightEdge <= leftEdge:
self.removePointSelections()
self.reDrawMainPlot()
return
if self.only_new_solver_wanted:
self.locateEvent.setEnabled(True)
self.rLimits = [leftEdge, rightEdge]
if self.dLimits:
self.DandR.setChecked(True)
else:
self.Ronly.setChecked(True)
self.rRegion = pg.LinearRegionItem(
[leftEdge, rightEdge], movable=False, brush=(200, 0, 0, 50))
self.mainPlot.addItem(self.rRegion)
self.showMsg('R zone selected: ' + str([leftEdge, rightEdge]))
self.removePointSelections()
self.reDrawMainPlot()
self.findEvent()
self.left = savedLeft
self.right = savedRight
self.reDrawMainPlot()
if self.solution:
frameDelta = float(self.yFrame[1]) - float(self.yFrame[0])
frameZero = float(self.yFrame[0])
flashFrame = self.solution[1] * frameDelta + frameZero
# self.flashEdges.append(self.solution[1] + float(self.yFrame[0]))
self.flashEdges.append(flashFrame)
self.flashEdges[-1] = '%0.2f' % self.flashEdges[-1]
msg = 'flash edges (in frame units): %s' % str(self.flashEdges)
self.showMsg(msg, bold=True, color='red')
def normalize(self):
if len(self.selectedPoints) != 1:
self.showInfo('A single point must be selected for this operation.' +
'That point will retain its value while all other points ' +
'are scaled (normalized) around it.')
return
selIndices = [key for key, value in self.selectedPoints.items()]
index = selIndices[0]
# self.showMsg('Index: ' + str(index) )
# Reminder: the smoothSecondary[] only cover self.left to self.right inclusive,
# hence the index manipulation in the following code
ref = self.smoothSecondary[int(index)-self.left]
for i in range(self.left, self.right+1):
try:
self.yValues[i] = (ref * self.yValues[i]) / self.smoothSecondary[i-self.left]
except Exception as e:
self.showMsg(str(e))
self.fillTableViewOfData() # This should capture/write the effects of the normalization to the table
self.showMsg('Light curve normalized to secondary around point ' + str(index))
self.normalized = True
self.removePointSelections()
self.normalizeButton.setEnabled(False)
self.smoothSecondaryButton.setEnabled(False)
self.numSmoothPointsEdit.setEnabled(False)
self.setDataLimits.setEnabled(False)
self.showSecondaryCheckBox.setChecked(False)
self.reDrawMainPlot()
def smoothRefStar(self):
if (self.right - self.left) < 4:
self.showInfo('The smoothing algorithm requires a minimum selection of 5 points')
return
y = [self.yRefStar[i] for i in range(self.left, self.right+1)]
userSpecedWindow = 101
numPts = self.numSmoothPointsEdit.text().strip()
if numPts:
if not numPts.isnumeric():
self.showInfo('Invalid entry for smoothing window size - defaulting to 101')
else:
userSpecedWindow = int(numPts)
if userSpecedWindow < 5:
self.showInfo('smoothing window must be size 5 or greater - defaulting to 101')
userSpecedWindow = 101
window = None
try:
if len(y) > userSpecedWindow:
window = userSpecedWindow
else:
window = len(y)
# Enforce the odd window size required by savgol_filter()
if window % 2 == 0:
window -= 1
# We do a double pass with a third order savgol filter
filteredY = scipy.signal.savgol_filter(np.array(y), window, 3)
self.smoothSecondary = scipy.signal.savgol_filter(filteredY, window, 3)
# New in version 3.7.2: we remove the extrapolated points at each end of self.smoothSecondary
self.extra_point_count = window // 2
self.selectedPoints = {self.left + self.extra_point_count: 3,
self.right - self.extra_point_count: 3}
saved_smoothSecondary = self.smoothSecondary
self.doTrim()
self.smoothSecondary = saved_smoothSecondary
self.smoothSecondary = self.smoothSecondary[self.extra_point_count:-self.extra_point_count]
# self.left += self.extra_point_count
# self.right -= self.extra_point_count
self.reDrawMainPlot()
except Exception as e:
self.showMsg(str(e))
self.showMsg('Smoothing of secondary star light curve performed with window size: %i' % window)
self.normalizeButton.setEnabled(True)
def toggleDisplayOfTimestampErrors(self):
self.reDrawMainPlot()
self.mainPlot.autoRange()
def toggleDisplayOfSecondaryStar(self):
if self.showSecondaryCheckBox.isChecked():
self.secondarySelector.setEnabled(True)
else:
self.secondarySelector.setEnabled(False)
if self.showSecondaryCheckBox.isChecked():
self.changeSecondary()
else:
self.reDrawMainPlot()
self.mainPlot.autoRange()
def showInfo(self, stuffToSay):
QMessageBox.information(self, 'General information', stuffToSay)
def showQuery(self, question, title=''):
msgBox = QMessageBox(self)
msgBox.setIcon(QMessageBox.Question)
msgBox.setText(question)
msgBox.setWindowTitle(title)
msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
msgBox.setDefaultButton(QMessageBox.Yes)
self.queryRetVal = msgBox.exec_()
def fillPrimaryAndRef(self):
# Load self.yValues and sel.yRefStar with proper light curves as
# indicated by the spinner values
# Get indices of selected primary and reference light curves
primary = self.curveToAnalyzeSpinBox.value()
reference = self.secondarySelector.value()
if primary == 1:
self.yValues = self.LC1
elif primary == 2:
self.yValues = self.LC2
elif primary == 3:
self.yValues = self.LC3
elif primary == 4:
self.yValues = self.LC4
else:
self.yValues = self.extra[primary - 5]
if primary == reference:
if reference == 1:
self.yRefStar = self.LC1
elif reference == 2:
self.yRefStar = self.LC2
elif reference == 3:
self.yRefStar = self.LC3
elif reference == 4:
self.yRefStar = self.LC4
else:
self.yRefStar = self.extra[reference - 5]
# noinspection PyUnusedLocal
self.yStatus = [1 for _i in range(self.dataLen)]
def doIntegration(self):
if len(self.selectedPoints) == 0:
self.showMsg('Analysis of all possible block integration sizes and offsets',
color='red', bold=True)
notchList = []
kList = []
offsetList = []
self.progressBar.setValue(0)
progress = 0
integrationSizes = [2, 4, 8, 16, 32, 48, 64, 96, 128, 256]
for k in integrationSizes:
kList.append(k)
ans = mean_std_versus_offset(k, self.yValues)
progress += 1
self.progressBar.setValue((progress / len(integrationSizes)) * 100)
QtGui.QApplication.processEvents()
offsetList.append(np.argmin(ans))
median = np.median(ans)
notch = np.min(ans) / median
notchList.append(notch)
s = '%3d notch %0.2f [' % (k, notch)
for item in ans:
s = s + '%0.1f, ' % item
self.showMsg(s[:-2] + ']', blankLine=False)
QtGui.QApplication.processEvents()
self.progressBar.setValue(0)
QtGui.QApplication.processEvents()
best = int(np.argmin(notchList))
blockSize = kList[best]
offset = int(offsetList[best])
self.showMsg(' ', blankLine=False)
s = '\r\nBest integration estimate: blockSize: %d @ offset %d' % (blockSize, offset)
self.showMsg(s, color='red', bold=True)
brush1 = (0, 200, 0, 70)
brush2 = (200, 0, 0, 70)
leftEdge = offset - 0.5
rightEdge = leftEdge + blockSize
bFlag = True
while rightEdge <= len(self.yValues):
if bFlag:
bFlag = False
brushToUse = brush2
else:
bFlag = True
brushToUse = brush1
if bFlag:
self.mainPlot.addItem(pg.LinearRegionItem([leftEdge, rightEdge],
movable=False, brush=brushToUse))
leftEdge += blockSize
rightEdge += blockSize
# Set the integration selection point indices
self.bint_left = offset
self.bint_right = offset + blockSize - 1
self.selPts = [self.bint_left, self.bint_right]
self.acceptBlockIntegration.setEnabled(True)
elif len(self.selectedPoints) != 2:
self.showInfo('Exactly two points must be selected for a block integration')
return
else:
self.bint_left = None # Force use of selectPoints in applyIntegration()
# self.acceptBlockIntegration.setEnabled(False)
self.applyIntegration()
def applyIntegration(self):
if self.bint_left is None:
if self.outliers:
self.showInfo('This data set contains some erroneous time steps, which have ' +
'been marked with red lines. Best practice is to ' +
'choose an integration block that is ' +
'positioned in an unmarked region, hopefully containing ' +
'the "event". Block integration ' +
'proceeds to the left and then to the right of the marked block.')
self.selPts = [key for key in self.selectedPoints.keys()]
self.removePointSelections()
self.bint_left = min(self.selPts)
self.bint_right = max(self.selPts)
# Time to do the work
p0 = self.bint_left
span = self.bint_right - self.bint_left + 1 # Number of points in integration block
self.blockSize = span
newFrame = []
newTime = []
newLC1 = []
newLC2 = []
newLC3 = []
newLC4 = []
newExtra = [[] for _ in range(len(self.extra))]
if not self.blockSize % 2 == 0:
self.showInfo(f'Blocksize is {self.blockSize}\n\nAn odd number for blocksize is likely an error!')
p = p0 - span # Start working toward the left
while p > 0:
avg = np.mean(self.LC1[p:(p+span)])
newLC1.insert(0, avg)
if len(self.LC2) > 0:
avg = np.mean(self.LC2[p:(p+span)])
newLC2.insert(0, avg)
if len(self.LC3) > 0:
avg = np.mean(self.LC3[p:(p+span)])
newLC3.insert(0, avg)
if len(self.LC4) > 0:
avg = np.mean(self.LC4[p:(p+span)])
newLC4.insert(0, avg)
if len(newExtra) > 0:
for k, lc in enumerate(self.extra):
avg = np.mean(lc[p:(p+span)])
newExtra[k].insert(0, avg)
newFrame.insert(0, self.yFrame[p])
newTime.insert(0, self.yTimes[p])
p = p - span
p = p0 # Start working toward the right
while p < self.dataLen - span:
avg = np.mean(self.LC1[p:(p+span)])
newLC1.append(avg)
if len(self.LC2) > 0:
avg = np.mean(self.LC2[p:(p + span)])
newLC2.append(avg)
if len(self.LC3) > 0:
avg = np.mean(self.LC3[p:(p + span)])
newLC3.append(avg)
if len(self.LC4) > 0:
avg = np.mean(self.LC4[p:(p + span)])
newLC4.append(avg)
if len(newExtra) > 0:
for k, lc in enumerate(self.extra):
avg = np.mean(lc[p:(p + span)])
newExtra[k].append(avg)
newFrame.append(self.yFrame[p])
newTime.append(self.yTimes[p])
p = p + span
self.dataLen = len(newLC1)
self.LC1 = np.array(newLC1)
self.LC2 = np.array(newLC2)
self.LC3 = np.array(newLC3)
self.LC4 = np.array(newLC4)
if len(newExtra) > 0:
for k in range(len(newExtra)):
self.extra[k] = np.array(newExtra[k])
# auto-select all points
self.left = 0
self.right = self.dataLen - 1
self.fillPrimaryAndRef()
self.yTimes = newTime[:]
self.yFrame = newFrame[:]
self.fillTableViewOfData()
self.selPts.sort()
self.showMsg('Block integration started at entry ' + str(self.selPts[0]) +
' with block size of ' + str(self.selPts[1]-self.selPts[0]+1) + ' readings')
self.timeDelta, self.outliers, self.errRate = getTimeStepAndOutliers(self.yTimes)
self.showMsg('timeDelta: ' + fp.to_precision(self.timeDelta, 6) + ' seconds per block', blankLine=False)
self.showMsg('timestamp error rate: ' + fp.to_precision(100 * self.errRate, 2) + '%')
self.expDurEdit.setText(fp.to_precision(self.timeDelta, 6))
self.illustrateTimestampOutliers()
self.doBlockIntegration.setEnabled(False)
self.acceptBlockIntegration.setEnabled(False)
if self.showSecondaryCheckBox.isChecked():
self.changeSecondary()
self.reDrawMainPlot()
self.mainPlot.autoRange()
def togglePointSelected(self, index):
if self.yStatus[index] != 3:
# Save current status for possible undo (a later click)
self.selectedPoints[index] = self.yStatus[index]
self.yStatus[index] = 3 # Set color to 'selected'
else:
# Restore previous status (when originally clicked)
self.yStatus[index] = self.selectedPoints[index]
del (self.selectedPoints[index])
self.reDrawMainPlot() # Redraw plot to show selection change
def processClick(self, event):
# Don't allow mouse clicks to select points unless the cursor is blank
if self.blankCursor:
# This try/except handles case where user clicks in plot area before a
# plot has been drawn.
try:
mousePoint = self.mainPlotViewBox.mapSceneToView(event.scenePos())
index = round(mousePoint.x())
if index in range(self.dataLen):
if event.button() == 1: # left button clicked?
if index < self.left:
index = self.left
if index > self.right:
index = self.right
self.togglePointSelected(index)
self.acceptBlockIntegration.setEnabled(False)
# Move the table view of data so that clicked point data is visible
self.table.setCurrentCell(index, 0)
else:
pass # Out of bounds clicks simply ignored
except AttributeError:
pass
def initializeTableView(self):
self.table.clear()
self.table.setRowCount(3)
if not self.aperture_names:
# Handle non-PyMovie csv file
colLabels = ['FrameNum', 'timeInfo', 'LC1', 'LC2', 'LC3', 'LC4']
self.table.setColumnCount(6)
else:
self.table.setColumnCount(2 + len(self.aperture_names))
colLabels = ['FrameNum', 'timeInfo']
for column_name in self.aperture_names:
colLabels.append(column_name)
# if len(self.extra) > 0:
# for i in range(len(self.extra)):
# colLabels.append(f'LC{i+5}')
self.table.setHorizontalHeaderLabels(colLabels)
def closeEvent(self, event):
# Capture the close request and update 'sticky' settings
self.settings.setValue('size', self.size())
self.settings.setValue('pos', self.pos())
self.settings.setValue('usediff', self.enableDiffractionCalculationBox.isChecked())
self.settings.setValue('doOCRcheck', self.showOCRcheckFramesCheckBox.isChecked())
self.helperThing.close()
if self.d_underlying_lightcurve:
matplotlib.pyplot.close(self.d_underlying_lightcurve)
if self.r_underlying_lightcurve:
matplotlib.pyplot.close(self.r_underlying_lightcurve)
for frame_view in self.frameViews:
if frame_view:
frame_view.close()
curDateTime = datetime.datetime.today().ctime()
self.showMsg('')
self.showMsg('#' * 20 + ' Session ended: ' + curDateTime + ' ' + '#' * 20)
if self.errBarWin:
self.errBarWin.close()
event.accept()
def rowClick(self, row):
self.highlightReading(row)
def cellClick(self, row):
self.togglePointSelected(row)
def highlightReading(self, rdgNum):
x = [rdgNum]
y = [self.yValues[x]]
self.reDrawMainPlot()
self.mainPlot.plot(x, y, pen=None, symbol='o', symbolPen=(255, 0, 0),
symbolBrush=(255, 255, 0), symbolSize=10)
def showMsg(self, msg, color=None, bold=False, blankLine=True):
""" show standard output message """
htmlmsg = msg
if color:
htmlmsg = '<font color=' + color + '>' + htmlmsg + '</font>'
if bold:
htmlmsg = '<b>' + htmlmsg + '</b>'
htmlmsg = htmlmsg + '<br>'
self.textOut.moveCursor(QtGui.QTextCursor.End)
self.textOut.insertHtml(htmlmsg)
if blankLine:
self.textOut.insertHtml('<br>')
self.textOut.ensureCursorVisible()
if self.logFile:
fileObject = open(self.logFile, 'a')
fileObject.write(msg + '\n')
if blankLine:
fileObject.write('\n')
fileObject.close()
def reportSpecialProcedureUsed(self):
if self.blockSize == 1:
self.showMsg('This light curve has not been block integrated.',
color='blue', bold=True, blankLine=False)
else:
self.showMsg('Block integration of size %d has been applied to '
'this light curve.' %
self.blockSize, color='blue', bold=True, blankLine=False)
if self.normalized:
self.showMsg('This light curve has been normalized against a '
'reference star.',
color='blue', bold=True, blankLine=False)
if not (self.left == 0 and self.right == self.dataLen - 1):
self.showMsg('This light curve has been trimmed.',
color='blue', bold=True, blankLine=False)
self.showMsg('', blankLine=False)
ans = self.validateLightcurveDataInput()
if ans['success']:
self.showMsg(f'The following lightcurve parameters were utilized:',
color='blue', bold=True)
if self.enableDiffractionCalculationBox.isChecked():
self.showMsg(f"==== use diff: is checked", bold=True, blankLine=False)
else:
self.showMsg(f"==== use diff: is NOT checked", bold=True, blankLine=False)
if ans['exp_dur'] is not None:
self.showMsg(f"==== exp: {ans['exp_dur']:0.6f}", bold=True, blankLine=False)
if ans['ast_dist'] is not None:
self.showMsg(f"==== dist(AU): {ans['ast_dist']:0.4f}", bold=True, blankLine=False)
if ans['shadow_speed'] is not None:
self.showMsg(f"==== speed(km/sec): {ans['shadow_speed']:0.4f}", bold=True, blankLine=False)
if ans['star_diam'] is not None:
self.showMsg(f"==== Star diam(mas): {ans['star_diam']:0.4f}", bold=True, blankLine=False)
if ans['d_angle'] is not None:
self.showMsg(f"==== D limb angle: {ans['d_angle']:0.1f}", bold=True, blankLine=False)
if ans['r_angle'] is not None:
self.showMsg(f"==== R limb angle: {ans['r_angle']:0.1f}", bold=True, blankLine=False)
else:
self.showMsg(f'Some invalid entries were found in the lightcurve parameters panel',
color='blue', bold=True, blankLine=False)
self.showMsg('', blankLine=False)
def Dreport(self, deltaDhi, deltaDlo):
D, _ = self.solution
intD = int(D) # So that we can do lookup in the data table
noiseAsymmetry = self.snrA / self.snrB
if (noiseAsymmetry > 0.7) and (noiseAsymmetry < 1.3):
plusD = (deltaDhi - deltaDlo) / 2
minusD = plusD
else:
plusD = deltaDhi
minusD = -deltaDlo
# Save these for the 'envelope' plotter
self.plusD = plusD
self.minusD = minusD
frameNum = float(self.yFrame[intD])
Dframe = (D - intD) * self.framesPerEntry() + frameNum
self.showMsg('D: %.2f {+%.2f,-%.2f} (frame number)' % (Dframe, plusD * self.framesPerEntry(),
minusD * self.framesPerEntry()),
blankLine=False)
ts = self.yTimes[int(D)]
time = convertTimeStringToTime(ts)
adjTime = time + (D - int(D)) * self.timeDelta
ts = convertTimeToTimeString(adjTime)
self.showMsg('D: %s {+%.4f,-%.4f} seconds' %
(ts, plusD * self.timeDelta, minusD * self.timeDelta)
)
return adjTime
def Rreport(self, deltaRhi, deltaRlo):
_, R = self.solution
# if R: R = R - self.Roffset
noiseAsymmetry = self.snrA / self.snrB
if (noiseAsymmetry > 0.7) and (noiseAsymmetry < 1.3):
plusR = (deltaRhi - deltaRlo) / 2
minusR = plusR
else:
plusR = -deltaRlo # Deliberate 'inversion'
minusR = deltaRhi # Deliberate 'inversion'
# Save these for the 'envelope' plotter
self.plusR = plusR
self.minusR = minusR
intR = int(R)
frameNum = float(self.yFrame[intR])
Rframe = (R - intR) * self.framesPerEntry() + frameNum
self.showMsg('R: %.2f {+%.2f,-%.2f} (frame number)' % (Rframe, plusR * self.framesPerEntry(),
minusR * self.framesPerEntry()),
blankLine=False)
ts = self.yTimes[int(R)]
time = convertTimeStringToTime(ts)
adjTime = time + (R - int(R)) * self.timeDelta
ts = convertTimeToTimeString(adjTime)
self.showMsg('R: %s {+%.4f,-%.4f} seconds' %
(ts, plusR * self.timeDelta, minusR * self.timeDelta)
)
return adjTime
def confidenceIntervalReport(self, numSigmas, deltaDurhi, deltaDurlo, deltaDhi, deltaDlo,
deltaRhi, deltaRlo):
D, R = self.solution
self.showMsg('B: %0.2f {+/- %0.2f}' % (self.B, numSigmas * self.sigmaB / np.sqrt(self.nBpts)))
self.showMsg('A: %0.2f {+/- %0.2f}' % (self.A, numSigmas * self.sigmaA / np.sqrt(self.nApts)))
self.magdropReport(numSigmas)
self.showMsg('snr: %0.2f' % self.snrB)
if self.eventType == 'Donly':
self.Dreport(deltaDhi, deltaDlo)
elif self.eventType == 'Ronly':
self.Rreport(deltaRhi, deltaRlo)
elif self.eventType == 'DandR':
Dtime = self.Dreport(deltaDhi, deltaDlo)
Rtime = self.Rreport(deltaDhi, deltaDlo)
plusDur = ((deltaDurhi - deltaDurlo) / 2)
minusDur = plusDur
self.showMsg('Duration (R - D): %.4f {+%.4f,-%.4f} readings' %
((R - D) * self.framesPerEntry(),
plusDur * self.framesPerEntry(), minusDur * self.framesPerEntry()),
blankLine=False)
plusDur = ((deltaDurhi - deltaDurlo) / 2) * self.timeDelta
minusDur = plusDur
self.showMsg('Duration (R - D): %.4f {+%.4f,-%.4f} seconds' %
(Rtime - Dtime, plusDur, minusDur))
def penumbralConfidenceIntervalReport(self, numSigmas, deltaDurhi, deltaDurlo, deltaDhi, deltaDlo,
deltaRhi, deltaRlo):
D, R = self.solution
self.showMsg('B: %0.2f {+/- %0.2f}' % (self.B, numSigmas * self.sigmaB / np.sqrt(self.nBpts)))
self.showMsg('A: %0.2f {+/- %0.2f}' % (self.A, numSigmas * self.sigmaA / np.sqrt(self.nApts)))
self.magdropReport(numSigmas)
self.showMsg('snr: %0.2f' % self.snrB)
if self.eventType == 'Donly':
self.Dreport(deltaDhi, deltaDlo)
elif self.eventType == 'Ronly':
self.Rreport(deltaRhi, deltaRlo)
elif self.eventType == 'DandR':
Dtime = self.Dreport(deltaDhi, deltaDlo)
Rtime = self.Rreport(deltaDhi, deltaDlo)
plusDur = ((deltaDurhi - deltaDurlo) / 2)
minusDur = plusDur
self.showMsg('Duration (R - D): %.4f {+%.4f,-%.4f} readings' %
((R - D) * self.framesPerEntry(),
plusDur * self.framesPerEntry(), minusDur * self.framesPerEntry()),
blankLine=False)
plusDur = ((deltaDurhi - deltaDurlo) / 2) * self.timeDelta
minusDur = plusDur
self.showMsg('Duration (R - D): %.4f {+%.4f,-%.4f} seconds' %
(Rtime - Dtime, plusDur, minusDur))
def magDropString(self, B, A):
stdA = self.sigmaA / np.sqrt(self.nApts)
if not B > 0:
return 'NA because B is not greater than 0'
if A > B:
return 'NA because A is greater than B'
if A < stdA: # We're in limiting magDrop region when A is less than stdA
if stdA > B:
return 'NA because std(A) is greater than B'
else:
return f'> {(np.log10(B) - np.log10(stdA)) * 2.5:0.2f}'
else:
# This is normal return stdA < A < B > 0
return f'{(np.log10(B) - np.log10(A)) * 2.5:0.2f}'
def magdropReport(self, numSigmas):
Adelta = numSigmas * self.sigmaA / np.sqrt(self.nApts)
Amin = self.A - Adelta
Anom = self.A
Amax = self.A + Adelta
Bdelta = numSigmas * self.sigmaB / np.sqrt(self.nBpts)
Bmin = self.B - Bdelta
Bnom = self.B
Bmax = self.B + Bdelta
self.showMsg(f'minimum magDrop: {self.magDropString(Bmin, Amax)}')
self.showMsg(f'nominal magDrop: {self.magDropString(Bnom, Anom)}')
self.showMsg(f'maximum magDrop: {self.magDropString(Bmax, Amin)}')
def finalReportPenumbral(self):
self.displaySolution(True)
self.minusD = self.plusD = self.penumbralDerrBar # This the 2 sigma (95% ci) value
self.minusR = self.plusR = self.penumbralRerrBar
self.drawEnvelope() # Shows error bars at the 95% ci level
self.deltaDlo68 = self.deltaDhi68 = self.plusD / 2.0
self.deltaDlo95 = self.deltaDhi95 = self.plusD
self.deltaDlo99 = self.deltaDhi99 = 3.0 * self.plusD / 2.0
self.deltaRlo68 = self.deltaRhi68 = self.plusR / 2.0
self.deltaRlo95 = self.deltaRhi95 = self.plusR
self.deltaRlo99 = self.deltaRhi99 = 3.0 * self.plusR / 2.0
self.deltaDurhi68 = np.sqrt(self.deltaDhi68**2 + self.deltaRhi68**2)
self.deltaDurlo68 = - self.deltaDurhi68
self.deltaDurhi95 = 2.0 * self.deltaDurhi68
self.deltaDurlo95 = - self.deltaDurhi95
self.deltaDurhi99 = 3.0 * self.deltaDurhi68
self.deltaDurlo99 = - self.deltaDurhi99
# Grab the D and R values found and apply our timing convention
D, R = self.solution
if self.eventType == 'DandR':
self.showMsg('Timestamp validity check ...')
self.reportTimeValidity(D, R)
# self.calcNumBandApoints()
self.showMsg('================= 0.68 confidence interval report =================')
self.penumbralConfidenceIntervalReport(1, self.deltaDurhi68, self.deltaDurlo68,
self.deltaDhi68, self.deltaDlo68,
self.deltaRhi68, self.deltaRlo68)
self.showMsg('=============== end 0.68 confidence interval report ===============')
self.showMsg('================= 0.95 confidence interval report =================')
self.penumbralConfidenceIntervalReport(2, self.deltaDurhi95, self.deltaDurlo95,
self.deltaDhi95, self.deltaDlo95,
self.deltaRhi95, self.deltaRlo95)
self.showMsg('=============== end 0.95 confidence interval report ===============')
self.showMsg('================= 0.9973 confidence interval report ===============')
self.penumbralConfidenceIntervalReport(3, self.deltaDurhi99, self.deltaDurlo99,
self.deltaDhi99, self.deltaDlo99,
self.deltaRhi99, self.deltaRlo99)
self.showMsg('=============== end 0.9973 confidence interval report =============')
self.doDframeReport()
self.doRframeReport()
self.doDurFrameReport()
self.showMsg('=============== Summary report for Excel file =====================')
self.reportSpecialProcedureUsed() # This includes use of asteroid distance/speed and star diameter
if not self.timesAreValid:
self.showMsg("Times are invalid due to corrupted timestamps!",
color='red', bold=True)
self.showMsg(f'nominal magDrop: {self.magDropString(self.B, self.A)}')
self.xlsxDict['Comment'] = f'Nominal measured mag drop = {self.magDropString(self.B, self.A)}'
self.showMsg('snr: %0.2f' % self.snrB)
self.doDtimeReport()
self.doRtimeReport()
self.doDurTimeReport()
self.showMsg(
'Enter D and R error bars for each confidence interval in Excel spreadsheet without + or - sign (assumed to be +/-)')
self.showMsg('=========== end Summary report for Excel file =====================')
self.showMsg("Solution 'envelope' in the main plot drawn using 0.95 confidence interval error bars")
return
def finalReport(self, false_positive, false_probability):
self.xlsxDict = {}
self.writeDefaultGraphicsPlots()
# Grab the D and R values found and apply our timing convention
D, R = self.solution
if self.eventType == 'DandR':
self.showMsg('Timestamp validity check ...')
self.reportTimeValidity(D, R)
self.calcNumBandApoints()
self.showMsg('================= 0.68 confidence interval report =================')
self.confidenceIntervalReport(1, self.deltaDurhi68, self.deltaDurlo68,
self.deltaDhi68, self.deltaDlo68,
self.deltaRhi68, self.deltaRlo68)
self.showMsg('=============== end 0.68 confidence interval report ===============')
self.showMsg('================= 0.95 confidence interval report =================')
self.confidenceIntervalReport(2, self.deltaDurhi95, self.deltaDurlo95,
self.deltaDhi95, self.deltaDlo95,
self.deltaRhi95, self.deltaRlo95)
envelopePlusR = self.plusR
envelopePlusD = self.plusD
envelopeMinusR = self.minusR
envelopeMinusD = self.minusD
self.showMsg('=============== end 0.95 confidence interval report ===============')
self.showMsg('================= 0.9973 confidence interval report ===============')
self.confidenceIntervalReport(3, self.deltaDurhi99, self.deltaDurlo99,
self.deltaDhi99, self.deltaDlo99,
self.deltaRhi99, self.deltaRlo99)
self.showMsg('=============== end 0.9973 confidence interval report =============')
# Set the values to be used for the envelope plot (saved during 0.95 ci calculations)
self.plusR = envelopePlusR
self.plusD = envelopePlusD
self.minusR = envelopeMinusR
self.minusD = envelopeMinusD
self.doDframeReport()
self.doRframeReport()
self.doDurFrameReport()
self.showMsg('=============== Summary report for Excel file =====================')
self.reportSpecialProcedureUsed() # This includes use of asteroid distance/speed and star diameter
if false_positive:
self.showMsg(f"This 'drop' has a {false_probability:0.4f} probability of being an artifact of noise.",
bold=True, color='red', blankLine=False)
else:
self.showMsg(f"This 'drop' has a {false_probability:0.4f} probability of being an artifact of noise.",
bold=True, color='green', blankLine=False)
self.showMsg(f">>>> probability > 0.0000 indicates the 'drop' may be spurious (a noise artifact)."
f" Consult with an IOTA Regional Coordinator.", color='blue', blankLine=False)
self.showMsg(f">>>> probability = 0.0000 indicates the 'drop' is unlikely to be a noise artifact, but"
f" does not prove that the 'drop' is due to an occultation", color='blue', blankLine=False)
self.showMsg(f">>>> Consider 'drop' shape, timing, mag drop, duration and other positive observer"
f" chords before reporting the 'drop' as a positive.", color='blue')
self.showMsg("All timestamps are treated as being start-of-exposure times.",
color='red', bold=True)
self.showMsg("All times are calculated/reported based on the assumption that timestamps are "
"start-of-exposure times.",
color='blue', bold=True)
self.showMsg("It is critical that you make appropriate time adjustments when timestamps are "
"NOT start-of-exposure times.", color='red', bold=True, blankLine=False)
self.showMsg("If you use the North American Excel Spreadsheet report, all times will be properly corrected",
color='red', bold=True, blankLine=False)
self.showMsg("for camera delay and reported start-of-exposure times.",
color='red', bold=True, blankLine=False)
self.showMsg("For other users worldwide, use the appropriate corrections documented in the North American "
"Spreadsheet report form - use",
color='red', bold=True, blankLine=False)
self.showMsg("the documentation shown on the Corrections Tables tab.",
color='red', bold=True)
if not self.timesAreValid:
self.showMsg("Times are invalid due to corrupted timestamps!",
color='red', bold=True)
if self.choleskyFailed:
self.showMsg('Cholesky decomposition failed during error bar '
'calculations. '
'Noise has therefore been treated as being '
'uncorrelated.',
bold=True, color='red')
self.xlsxDict['Comment'] = f'Nominal measured mag drop = {self.magDropString(self.B, self.A)}'
self.showMsg(f'nominal magDrop: {self.magDropString(self.B, self.A)}')
self.showMsg('snr: %0.2f' % self.snrB)
self.doDtimeReport()
self.doRtimeReport()
self.doDurTimeReport()
self.showMsg('Enter D and R error bars for each confidence interval in Excel spreadsheet without + or - sign (assumed to be +/-)')
self.showMsg('=========== end Summary report for Excel file =====================')
self.showMsg("Solution 'envelope' in the main plot drawn using 0.95 confidence interval error bars")
self.showHelp(self.helpLabelForFalsePositive)
def doDframeReport(self):
if self.eventType == 'DandR' or self.eventType == 'Donly':
D, _ = self.solution
entryNum = int(D)
frameNum = float(self.yFrame[entryNum])
Dframe = (D - int(D)) * self.framesPerEntry() + frameNum
self.showMsg('D frame number: {0:0.2f}'.format(Dframe), blankLine=False)
errBar = max(abs(self.deltaDlo68), abs(self.deltaDhi68)) * self.framesPerEntry()
self.showMsg('D: 0.6800 confidence intervals: {{+/- {0:0.2f}}} (readings)'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaDlo95), abs(self.deltaDhi95)) * self.framesPerEntry()
self.showMsg('D: 0.9500 confidence intervals: {{+/- {0:0.2f}}} (readings)'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaDlo99), abs(self.deltaDhi99)) * self.framesPerEntry()
self.showMsg('D: 0.9973 confidence intervals: {{+/- {0:0.2f}}} (readings)'.format(errBar))
def framesPerEntry(self):
# Normally, there is 1 frame per reading (entry), but if the source file was recorded
# in field mode, there is only 0.5 frames per reading (entry). Here we make the correction.
if self.fieldMode:
return self.blockSize / 2
else:
return self.blockSize
def doRframeReport(self):
if self.eventType == 'DandR' or self.eventType == 'Ronly':
_, R = self.solution
entryNum = int(R)
frameNum = float(self.yFrame[entryNum])
Rframe = (R - int(R)) * self.framesPerEntry() + frameNum
self.showMsg('R frame number: {0:0.2f}'.format(Rframe), blankLine=False)
errBar = max(abs(self.deltaRlo68), abs(self.deltaRhi68)) * self.framesPerEntry()
self.showMsg('R: 0.6800 confidence intervals: {{+/- {0:0.2f}}} (readings)'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaRlo95), abs(self.deltaRhi95)) * self.framesPerEntry()
self.showMsg('R: 0.9500 confidence intervals: {{+/- {0:0.2f}}} (readings)'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaRlo99), abs(self.deltaRhi99)) * self.framesPerEntry()
self.showMsg('R: 0.9973 confidence intervals: {{+/- {0:0.2f}}} (readings)'.format(errBar))
def doDurFrameReport(self):
if self.eventType == 'DandR':
D, R = self.solution
self.showMsg('Duration (R - D): {0:0.4f} readings'.format((R - D) * self.framesPerEntry()), blankLine=False)
errBar = ((self.deltaDurhi68 - self.deltaDurlo68) / 2) * self.framesPerEntry()
self.showMsg('Duration: 0.6800 confidence intervals: {{+/- {0:0.4f}}} (readings)'.format(errBar),
blankLine=False)
errBar = ((self.deltaDurhi95 - self.deltaDurlo95) / 2) * self.framesPerEntry()
self.showMsg('Duration: 0.9500 confidence intervals: {{+/- {0:0.4f}}} (readings)'.format(errBar),
blankLine=False)
errBar = ((self.deltaDurhi99 - self.deltaDurlo99) / 2) * self.framesPerEntry()
self.showMsg('Duration: 0.9973 confidence intervals: {{+/- {0:0.4f}}} (readings)'.format(errBar))
def doDtimeReport(self):
if self.eventType == 'DandR' or self.eventType == 'Donly':
D, _ = self.solution
ts = self.yTimes[int(D)]
time = convertTimeStringToTime(ts)
adjTime = time + (D - int(D)) * self.timeDelta
self.Dtime = adjTime # This is needed for the duration report (assumed to follow!!!)
ts = convertTimeToTimeString(adjTime)
tsParts = ts[1:-1].split(':')
self.xlsxDict['Dhour'] = tsParts[0]
self.xlsxDict['Dmin'] = tsParts[1]
self.xlsxDict['Dsec'] = tsParts[2]
self.showMsg('D time: %s' % ts, blankLine=False)
errBar = max(abs(self.deltaDlo68), abs(self.deltaDhi68)) * self.timeDelta
self.xlsxDict['Derr68'] = errBar
self.showMsg('D: 0.6800 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaDlo95), abs(self.deltaDhi95)) * self.timeDelta
self.xlsxDict['Derr95'] = errBar
self.showMsg('D: 0.9500 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaDlo99), abs(self.deltaDhi99)) * self.timeDelta
self.xlsxDict['Derr99'] = errBar
self.showMsg('D: 0.9973 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar))
def doRtimeReport(self):
if self.eventType == 'DandR' or self.eventType == 'Ronly':
_, R = self.solution
ts = self.yTimes[int(R)]
time = convertTimeStringToTime(ts)
adjTime = time + (R - int(R)) * self.timeDelta
self.Rtime = adjTime # This is needed for the duration report (assumed to follow!!!)
ts = convertTimeToTimeString(adjTime)
tsParts = ts[1:-1].split(':')
self.xlsxDict['Rhour'] = tsParts[0]
self.xlsxDict['Rmin'] = tsParts[1]
self.xlsxDict['Rsec'] = tsParts[2]
self.showMsg('R time: %s' % ts, blankLine=False)
errBar = max(abs(self.deltaRlo68), abs(self.deltaRhi68)) * self.timeDelta
self.xlsxDict['Rerr68'] = errBar
self.showMsg('R: 0.6800 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaRlo95), abs(self.deltaRhi95)) * self.timeDelta
self.xlsxDict['Rerr95'] = errBar
self.showMsg('R: 0.9500 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar), blankLine=False)
errBar = max(abs(self.deltaRlo99), abs(self.deltaRhi99)) * self.timeDelta
self.xlsxDict['Rerr99'] = errBar
self.showMsg('R: 0.9973 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar))
def doDurTimeReport(self):
if self.eventType == 'DandR':
dur = self.Rtime - self.Dtime
if dur < 0: # We have bracketed midnight
dur = dur + 3600 * 24 # Add seconds in a day
self.showMsg('Duration (R - D): {0:0.4f} seconds'.format(dur), blankLine=False)
errBar = ((self.deltaDurhi68 - self.deltaDurlo68) / 2) * self.timeDelta
self.showMsg('Duration: 0.6800 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar), blankLine=False)
errBar = ((self.deltaDurhi95 - self.deltaDurlo95) / 2) * self.timeDelta
self.showMsg('Duration: 0.9500 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar), blankLine=False)
errBar = ((self.deltaDurhi99 - self.deltaDurlo99) / 2) * self.timeDelta
self.showMsg('Duration: 0.9973 confidence intervals: {{+/- {0:0.4f}}} seconds'.format(errBar))
def reportTimeValidity(self, D, R):
intD = int(D)
intR = int(R)
dTime = convertTimeStringToTime(self.yTimes[intD])
rTime = convertTimeStringToTime(self.yTimes[intR])
# Here we check for a 'midnight transition'
if rTime < dTime:
rTime += 24 * 60 * 60
self.showMsg('D and R enclose a transition through midnight')
if self.timeDelta == 0:
self.timesAreValid = False
self.showMsg('Timestamps are corrupted in a manner that caused a '
'timeDelta of '
'0.0 to be estimated!', color='red', bold=True)
self.showInfo('Timestamps are corrupted in a manner that caused a '
'timeDelta of '
'0.0 to be estimated!')
return
numEnclosedReadings = int(round((rTime - dTime) / self.timeDelta))
self.showMsg('From timestamps at D and R, calculated %d reading blocks. From reading blocks, calculated %d blocks.' %
(numEnclosedReadings, intR - intD))
if numEnclosedReadings == intR - intD:
self.showMsg('Timestamps appear valid @ D and R')
self.timesAreValid = True
else:
self.timesAreValid = False
self.showMsg('! There is something wrong with timestamps at D '
'and/or R or frames have been dropped !', bold=True,
color='red')
def computeErrorBars(self):
if self.penumbralFitCheckBox.isChecked():
self.finalReportPenumbral()
return
if self.sigmaB == 0.0:
self.sigmaB = MIN_SIGMA
if self.sigmaA == 0.0:
self.sigmaA = MIN_SIGMA
self.snrB = (self.B - self.A) / self.sigmaB
self.snrA = (self.B - self.A) / self.sigmaA
snr = max(self.snrB, 0.2) # A more reliable number
D = int(round(80 / snr**2 + 0.5))
D = max(10, D)
if self.corCoefs.size > 1:
D = round(1.5 * D)
numPts = 2 * (D - 1) + 1
posCoefs = []
for entry in self.corCoefs:
if entry < acfCoefThreshold:
break
posCoefs.append(entry)
distGen = edgeDistributionGenerator(
ntrials=100000, numPts=numPts, D=D, acfcoeffs=posCoefs,
B=self.B, A=self.A, sigmaB=self.sigmaB, sigmaA=self.sigmaA)
dist = None
self.choleskyFailed = False
for dist in distGen:
if type(dist) == float:
if dist == -1.0:
self.choleskyFailed = True
self.showInfo(
'The Cholesky-Decomposition routine has failed. This may be because the light curve ' +
'required some level of block integration. Please '
'examine the light curve for that possibility.' +
'\nWe treat this situation as though there is no '
'correlation in the noise.')
self.showMsg('Cholesky decomposition has failed. '
'Proceeding by '
'treating noise as being uncorrelated.',
bold=True, color='red')
self.progressBar.setValue(int(dist * 100))
QtGui.QApplication.processEvents()
if self.cancelRequested:
self.cancelRequested = False
self.showMsg('Error bar calculation was cancelled')
self.progressBar.setValue(0)
return
else:
# self.calcErrBars.setEnabled(False)
self.progressBar.setValue(0)
y, x = np.histogram(dist, bins=1000)
self.loDbar95, _, self.hiDbar95, self.deltaDlo95, self.deltaDhi95 = ciBars(dist=dist, ci=0.95)
self.loDbar99, _, self.hiDbar99, self.deltaDlo99, self.deltaDhi99 = ciBars(dist=dist, ci=0.9973)
self.loDbar68, _, self.hiDbar68, self.deltaDlo68, self.deltaDhi68 = ciBars(dist=dist, ci=0.6827)
self.deltaRlo95 = - self.deltaDhi95
self.deltaRhi95 = - self.deltaDlo95
self.deltaRlo99 = - self.deltaDhi99
self.deltaRhi99 = - self.deltaDlo99
self.deltaRlo68 = - self.deltaDhi68
self.deltaRhi68 = - self.deltaDlo68
if isinstance(dist, np.ndarray):
durDist = createDurDistribution(dist)
else:
self.showInfo('Unexpected error: variable dist is not of type np.ndarray')
return
ydur, xdur = np.histogram(durDist, bins=1000)
self.loDurbar95, _, self.hiDurbar95, self.deltaDurlo95, self.deltaDurhi95 = ciBars(dist=durDist, ci=0.95)
self.loDurbar99, _, self.hiDurbar99, self.deltaDurlo99, self.deltaDurhi99 = ciBars(dist=durDist, ci=0.9973)
self.loDurbar68, _, self.hiDurbar68, self.deltaDurlo68, self.deltaDurhi68 = ciBars(dist=durDist, ci=0.6827)
pg.setConfigOptions(antialias=True)
pen = pg.mkPen((0, 0, 0), width=2)
# Get rid of a previous errBarWin that may have been closed (but not properly disposed of) by the user.
if self.errBarWin is not None:
self.errBarWin.close()
self.errBarWin = pg.GraphicsWindow(
title='Solution distributions with confidence intervals marked --- false positive distribution')
self.errBarWin.resize(1200, 1000)
layout = QtGui.QGridLayout()
self.errBarWin.setLayout(layout)
pw = PlotWidget(viewBox=CustomViewBox(border=(0, 0, 0)),
enableMenu=False, title='Distribution of edge (D) errors due to noise',
labels={'bottom': 'Reading blocks'})
self.dBarPlotItem = pw.getPlotItem()
pw.hideButtons()
pw2 = PlotWidget(viewBox=CustomViewBox(border=(0, 0, 0)),
enableMenu=False, title='Distribution of duration (R - D) errors due to noise',
labels={'bottom': 'Reading blocks'})
self.durBarPlotItem = pw2.getPlotItem()
pw2.hideButtons()
pw3, false_positive, false_probability = self.doFalsePositiveReport(posCoefs)
self.falsePositivePlotItem = pw3.getPlotItem()
layout.addWidget(pw, 0, 0)
layout.addWidget(pw2, 0, 1)
layout.addWidget(pw3, 1, 0, 1, 2) # (pw3, row_start, col_start, n_rows_to_span, n_cols_to_span)
pw.plot(x-D, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150))
pw.addLine(y=0, z=-10, pen=[0, 0, 255])
pw.addLine(x=0, z=+10, pen=[255, 0, 0])
yp = max(y) * 0.75
x1 = self.loDbar68-D
pw.plot(x=[x1, x1], y=[0, yp], pen=pen)
x2 = self.hiDbar68-D
pw.plot(x=[x2, x2], y=[0, yp], pen=pen)
pw.addLegend()
legend68 = '[%0.2f,%0.2f] @ 0.6827' % (x1, x2)
pw.plot(name=legend68)
self.showMsg("Error bar report based on 100,000 simulations (units are readings)...")
self.showMsg('loDbar @ .68 ci: %8.4f' % (x1 * self.framesPerEntry()), blankLine=False)
self.showMsg('hiDbar @ .68 ci: %8.4f' % (x2 * self.framesPerEntry()), blankLine=False)
yp = max(y) * 0.25
x1 = self.loDbar95-D
pw.plot(x=[x1, x1], y=[0, yp], pen=pen)
x2 = self.hiDbar95-D
pw.plot(x=[x2, x2], y=[0, yp], pen=pen)
self.showMsg('loDbar @ .95 ci: %8.4f' % (x1 * self.framesPerEntry()), blankLine=False)
self.showMsg('hiDbar @ .95 ci: %8.4f' % (x2 * self.framesPerEntry()), blankLine=False)
legend95 = '[%0.2f,%0.2f] @ 0.95' % (x1, x2)
pw.plot(name=legend95)
yp = max(y) * 0.15
x1 = self.loDbar99 - D
pw.plot(x=[x1, x1], y=[0, yp], pen=pen)
x2 = self.hiDbar99 - D
pw.plot(x=[x2, x2], y=[0, yp], pen=pen)
self.showMsg('loDbar @ .9973 ci: %8.4f' % (x1 * self.framesPerEntry()), blankLine=False)
self.showMsg('hiDbar @ .9973 ci: %8.4f' % (x2 * self.framesPerEntry()), blankLine=True)
legend99 = '[%0.2f,%0.2f] @ 0.9973' % (x1, x2)
pw.plot(name=legend99)
pw.hideAxis('left')
pw2.plot(xdur, ydur, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150))
pw2.addLine(y=0, z=-10, pen=[0, 0, 255])
pw2.addLine(x=0, z=+10, pen=[255, 0, 0])
yp = max(ydur) * 0.75
x1 = self.loDurbar68
pw2.plot(x=[x1, x1], y=[0, yp], pen=pen)
x2 = self.hiDurbar68
pw2.plot(x=[x2, x2], y=[0, yp], pen=pen)
pw2.addLegend()
legend68 = '[%0.2f,%0.2f] @ 0.6827' % (x1, x2)
pw2.plot(name=legend68)
self.showMsg('loDurBar @ .68 ci: %8.4f' % (x1 * self.framesPerEntry()), blankLine=False)
self.showMsg('hiDurBar @ .68 ci: %8.4f' % (x2 * self.framesPerEntry()), blankLine=False)
yp = max(ydur) * 0.25
x1 = self.loDurbar95
pw2.plot(x=[x1, x1], y=[0, yp], pen=pen)
x2 = self.hiDurbar95
pw2.plot(x=[x2, x2], y=[0, yp], pen=pen)
self.showMsg('loDurBar @ .95 ci: %8.4f' % (x1 * self.framesPerEntry()), blankLine=False)
self.showMsg('hiDurBar @ .95 ci: %8.4f' % (x2 * self.framesPerEntry()), blankLine=False)
legend95 = '[%0.2f,%0.2f] @ 0.95' % (x1, x2)
pw2.plot(name=legend95)
yp = max(ydur) * 0.15
x1 = self.loDurbar99
pw2.plot(x=[x1, x1], y=[0, yp], pen=pen)
x2 = self.hiDurbar99
pw2.plot(x=[x2, x2], y=[0, yp], pen=pen)
self.showMsg('loDurBar @ .9973 ci: %8.4f' % (x1 * self.framesPerEntry()), blankLine=False)
self.showMsg('hiDurBar @ .9973 ci: %8.4f' % (x2 * self.framesPerEntry()), blankLine=True)
legend99 = '[%0.2f,%0.2f] @ 0.9973' % (x1, x2)
pw2.plot(name=legend99)
pw2.hideAxis('left')
self.writeBarPlots.setEnabled(True)
if self.timestampListIsEmpty(self.yTimes):
self.showMsg('Cannot produce final report because timestamps are missing.', bold=True, color='red')
else:
self.finalReport(false_positive, false_probability)
self.fillExcelReportButton.setEnabled(True)
self.reDrawMainPlot() # To add envelope to solution
def doFalsePositiveReport(self, posCoefs):
d, r = self.solution
if self.eventType == 'Donly':
event_duration = self.right - int(np.trunc(d))
elif self.eventType == 'Ronly':
event_duration = int(np.ceil(r)) - self.left
else:
event_duration = int(np.ceil(r - d))
observation_size = self.right - self.left + 1
sigma = max(self.sigmaA, self.sigmaB)
observed_drop = self.B - self.A
num_trials = 50_000
drops = compute_drops(event_duration=event_duration, observation_size=observation_size,
noise_sigma=sigma, corr_array=np.array(posCoefs), num_trials=num_trials)
pw = PlotWidget(viewBox=CustomViewBox(border=(0, 0, 0)),
enableMenu=False,
title=f'Distribution of drops found in correlated noise for event duration: {event_duration}',
labels={'bottom': 'drop size', 'left': 'number of times noise produced drop'})
pw.hideButtons()
y, x = np.histogram(drops, bins=50)
pw.plot(x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150))
pw.plot(x=[observed_drop, observed_drop], y=[0, 1.5 * np.max(y)], pen=pg.mkPen([255, 0, 0], width=6))
pw.plot(x=[np.max(x), np.max(x)], y=[0, 0.25 * np.max(y)], pen=pg.mkPen([0, 0, 0], width=6))
pw.addLegend()
pw.plot(name='red line: the drop (B - A) extracted from lightcurve')
pw.plot(name=f'black line: max drop found in {num_trials} trials against pure noise')
pw.plot(name='If the red line is to the right of the black line, false positive prob = 0')
sorted_drops = np.sort(drops)
index_of_observed_drop_inside_sorted_drops = None
for i, value in enumerate(sorted_drops):
if value >= observed_drop:
index_of_observed_drop_inside_sorted_drops = i
break
if index_of_observed_drop_inside_sorted_drops is None:
false_probability = 0.0
false_positive = False
else:
false_probability = 1.0 - index_of_observed_drop_inside_sorted_drops / drops.size
false_positive = True
return pw, false_positive, false_probability
def displaySolution(self, subframe=True):
D, R = self.solution
# D and R are floats and may be fractional because of sub-frame timing.
# We have to remove the effects of sub-frame timing to calulate the D
# and R transition points as integers.
solMsg2 = ''
frameConv = float(self.yFrame[0])
DinFrameUnits = None
RinFrameUnits = None
if D and R:
Dtransition = trunc(floor(self.solution[0]))
Rtransition = trunc(floor(self.solution[1]))
DinFrameUnits = Dtransition * self.framesPerEntry() + frameConv
RinFrameUnits = Rtransition * self.framesPerEntry() + frameConv
if subframe:
solMsg = ('D: %d R: %d D(subframe): %0.4f R(subframe): %0.4f' %
(Dtransition, Rtransition, D, R))
solMsg2 = ('D: %d R: %d D(subframe): %0.4f R(subframe): '
'%0.4f' %
(DinFrameUnits,
RinFrameUnits,
D * self.framesPerEntry() + frameConv, R * self.framesPerEntry() + frameConv))
else:
solMsg = ('D: %d R: %d' % (D, R))
self.showMsg('in entryNum units: ' + solMsg)
if solMsg2:
self.showMsg('in frameNum units: ' + solMsg2, bold=True)
elif D:
Dtransition = trunc(floor(self.solution[0]))
DinFrameUnits = Dtransition * self.framesPerEntry() + frameConv
if subframe:
solMsg = ('D: %d D(subframe): %0.4f' % (Dtransition, D))
solMsg2 = ('D: %d D(subframe): %0.4f' %
(DinFrameUnits, D * self.framesPerEntry() + frameConv))
else:
solMsg = ('D: %d' % D)
self.showMsg('in entryNum units: ' + solMsg)
if solMsg2:
self.showMsg('in frameNum units: ' + solMsg2, bold=True)
else:
Rtransition = trunc(floor(self.solution[1]))
RinFrameUnits = Rtransition * self.framesPerEntry() + frameConv
if subframe:
solMsg = ('R: %d R(subframe): %0.4f' % (Rtransition, R))
solMsg2 = ('R: %d R(subframe): %0.4f' %
(RinFrameUnits, R * self.framesPerEntry() + frameConv))
else:
solMsg = ('R: %d' % R)
self.showMsg('in entryNum units: ' + solMsg)
if solMsg2:
self.showMsg('in frameNum units: ' + solMsg2, bold=True)
# This function is called twice: once without a subframe calculation and then again with
# subframe calculations enabled. We only want to display D and/or R frames at the end
# of the second pass
if subframe:
if self.showOCRcheckFramesCheckBox.isChecked():
if self.pathToVideo:
if DinFrameUnits:
self.showAnnotatedFrame(int(DinFrameUnits), "D edge:")
if RinFrameUnits:
self.showAnnotatedFrame(int(RinFrameUnits), 'R edge:')
return True
return False
def update_noise_parameters_from_solution(self):
D, R = self.solution
# D and R are floats and may be fractional because of sub-frame timing.
# Here we remove the effects of sub-frame timing to calulate the D and
# and R transition points as integers.
if D:
D = trunc(floor(D))
if R:
R = trunc(floor(R))
self.showMsg('Recalculating noise parameters to include all points '
'based on first pass solution ====',
color='red', bold=True)
if D and R:
self.sigmaA = None
self.corCoefs = []
self.selectedPoints = {}
self.togglePointSelected(self.left)
self.togglePointSelected(D-1)
self.processBaselineNoise(secondPass=True)
self.selectedPoints = {}
self.togglePointSelected(R+1)
self.togglePointSelected(self.right)
self.processBaselineNoise(secondPass=True)
self.selectedPoints = {}
self.togglePointSelected(D+1)
self.togglePointSelected(R-1)
self.processEventNoise(secondPass=True)
elif D:
self.sigmaA = None
self.corCoefs = []
self.selectedPoints = {}
self.togglePointSelected(self.left)
self.togglePointSelected(D - 1)
self.processBaselineNoise(secondPass=True)
self.selectedPoints = {}
self.togglePointSelected(D + 1)
self.togglePointSelected(self.right)
self.processEventNoise(secondPass=True)
else:
self.sigmaA = None
self.corCoefs = []
self.selectedPoints = {}
self.togglePointSelected(R + 1)
self.togglePointSelected(self.right)
self.processBaselineNoise(secondPass=True)
self.selectedPoints = {}
self.togglePointSelected(self.left)
self.togglePointSelected(R - 1)
self.processEventNoise(secondPass=True)
return
def extract_noise_parameters_from_iterative_solution(self):
D, R = self.solution
# D and R are floats and may be fractional because of sub-frame timing.
# Here we remove the effects of sub-frame timing to calulate the D and
# and R transition points as integers.
if D:
D = trunc(floor(D))
if R:
R = trunc(floor(R))
if D and R:
self.sigmaA = None
self.corCoefs = []
self.processBaselineNoiseFromIterativeSolution(self.left, D - 1)
self.processBaselineNoiseFromIterativeSolution(R + 1, self.right)
self.processEventNoiseFromIterativeSolution(D + 1, R - 1)
# Try to warn user about the possible need for block integration by testing the lag 1
# and lag 2 correlation coefficients. The tests are just guesses on my part, so only
# warnings are given. Later, the Cholesky-Decomposition may fail because block integration
# was really needed. That is a fatal error but is trapped and the user alerted to the problem
if len(self.corCoefs) > 1:
if self.corCoefs[1] >= 0.7:
self.showInfo(
'The auto-correlation coefficient at lag 1 is suspiciously large. '
'This may be because the light curve needs some degree of block integration. '
'Failure to do a needed block integration allows point-to-point correlations caused by '
'the camera integration to artificially induce non-physical correlated noise.')
elif len(self.corCoefs) > 2:
if self.corCoefs[2] >= 0.3:
self.showInfo(
'The auto-correlation coefficient at lag 2 is suspiciously large. '
'This may be because the light curve needs some degree of block integration. '
'Failure to do a needed block integration allows point-to-point correlations caused by '
'the camera integration to artificially induce non-physical correlated noise.')
if self.sigmaA is None:
self.sigmaA = self.sigmaB
elif D:
self.sigmaA = None
self.corCoefs = []
self.processBaselineNoiseFromIterativeSolution(self.left, D - 1)
self.processEventNoiseFromIterativeSolution(D + 1, self.right)
if self.sigmaA is None:
self.sigmaA = self.sigmaB
else: # R only
self.sigmaA = None
self.corCoefs = []
self.processBaselineNoiseFromIterativeSolution(R + 1, self.right)
self.processEventNoiseFromIterativeSolution(self.left, R - 1)
if self.sigmaA is None:
self.sigmaA = self.sigmaB
self.prettyPrintCorCoefs()
return
def try_to_get_solution(self):
self.solution = None
self.reDrawMainPlot()
solverGen = solver(
eventType=self.eventType, yValues=self.yValues,
left=self.left, right=self.right,
sigmaB=self.sigmaB, sigmaA=self.sigmaA,
dLimits=self.dLimits, rLimits=self.rLimits,
minSize=self.minEvent, maxSize=self.maxEvent)
self.cancelRequested = False
for item in solverGen:
if item[0] == 'fractionDone':
pass
# Here we should update progress bar and check for cancellation
# self.progressBar.setValue(item[1] * 100)
# QtGui.QApplication.processEvents()
# if self.cancelRequested:
# self.cancelRequested = False
# self.runSolver = False
# self.showMsg('Solution search was cancelled')
# self.progressBar.setValue(0)
# break
elif item[0] == 'no event present':
self.showMsg('No event fitting search criteria could be found.')
# self.progressBar.setValue(0)
self.runSolver = False
break
else:
# self.progressBar.setValue(0)
self.solution = item[0]
self.B = item[1]
self.A = item[2]
def compareFirstAndSecondPassResults(self):
D1, R1 = self.firstPassSolution
D2, R2 = self.secondPassSolution
if D1:
D1 = trunc(floor(D1))
if D2:
D2 = trunc(floor(D2))
if R1:
R1 = trunc(floor(R1))
if R2:
R2 = trunc(floor(R2))
if D1 == D2 and R1 == R2:
return
# There is a difference in the D and/or R transition points identified
# in the first and second passes --- alert the user.
self.showInfo('The D and/or R transition points identified in pass 1 '
'are different from those found in pass 2 (after '
'automatic noise analysis). '
'It is recommended that you '
'rerun the light curve using the D and R values found in '
'this second pass to more accurately select points for '
'the initial noise analysis.')
def extractBaselineAndEventData(self):
if self.dLimits and self.rLimits:
left_baseline_pts = self.yValues[self.left:self.dLimits[0]]
right_baseline_pts = self.yValues[self.rLimits[1]+1:self.right + 1]
baseline_pts = np.concatenate((left_baseline_pts, right_baseline_pts))
event_pts = self.yValues[self.dLimits[1]+1:self.rLimits[0]]
elif self.dLimits:
baseline_pts = self.yValues[self.left:self.dLimits[0]]
event_pts = self.yValues[self.dLimits[1] + 1:self.right + 1]
elif self.rLimits:
baseline_pts = self.yValues[self.rLimits[1] + 1:self.right + 1]
event_pts = self.yValues[self.left:self.rLimits[0]]
else:
self.showInfo(f'No D or R region has been marked!')
return None, None, None, None, None, None
B = np.mean(baseline_pts)
Bnoise = np.std(baseline_pts)
numBpts = len(baseline_pts)
A = np.mean(event_pts)
Anoise = np.std(event_pts)
numApts = len(event_pts)
return B, Bnoise, numBpts, A, Anoise, numApts
def doPenumbralFit(self):
if self.firstPassPenumbralFit:
self.firstPassPenumbralFit = False
self.lastDmetric = self.lastRmetric = 0.0
self.penumbralFitIterationNumber = 1
b_intensity, b_noise, num_b_pts, a_intensity, a_noise, num_a_pts = self.extractBaselineAndEventData()
if b_intensity is None:
return # An info message will have already been raised. No need to do anything else.
# Get current underlying lightcurve
self.underlyingLightcurveAns = self.demoUnderlyingLightcurves(baseline=b_intensity, event=a_intensity,
plots_wanted=False)
# Adjust b_intensity and a_intensity to match the underlying lightcurve table
b_intensity = self.underlyingLightcurveAns['B']
a_intensity = self.underlyingLightcurveAns['A']
self.showMsg(f'B: {b_intensity:0.2f} A: {a_intensity:0.2f} B noise: {b_noise:0.3f} A noise: {a_noise:0.3f}')
d_candidates = []
r_candidates = []
d_candidate_entry_nums = []
r_candidate_entry_nums = []
if self.dLimits:
self.eventType = 'Donly'
d_region_intensities = self.yValues[self.dLimits[0]:self.dLimits[1] + 1]
d_region_entry_nums = range(self.dLimits[0], self.dLimits[1] + 1)
middle = len(d_region_intensities) // 2
i = middle
while d_region_intensities[i] > a_intensity:
d_candidates.append(d_region_intensities[i])
d_candidate_entry_nums.append(d_region_entry_nums[i])
i += 1
if i == len(d_region_intensities):
break
i = middle - 1
while d_region_intensities[i] < b_intensity:
d_candidates.append(d_region_intensities[i])
d_candidate_entry_nums.append(d_region_entry_nums[i])
i -= 1
if i < 0:
break
if not d_candidates:
self.showMsg('No valid transition points found in designated D region.', bold=True, color='red')
return
# Sort the parallel lists into ascending entry number order
zipped_lists = zip(d_candidate_entry_nums, d_candidates)
sorted_pairs = sorted(zipped_lists)
tuples = zip(*sorted_pairs)
d_candidate_entry_nums, d_candidates = [list(item) for item in tuples]
print("d_candidates", d_candidates)
print("D entry nums", d_candidate_entry_nums)
if self.rLimits:
self.eventType = 'Ronly'
r_region_intensities = self.yValues[self.rLimits[0]:self.rLimits[1] + 1]
r_region_entry_nums = range(self.rLimits[0], self.rLimits[1] + 1)
middle = len(r_region_intensities) // 2
i = middle
while r_region_intensities[i] < b_intensity:
r_candidates.append(r_region_intensities[i])
r_candidate_entry_nums.append(r_region_entry_nums[i])
i += 1
if i == len(r_region_intensities):
break
i = middle - 1
while r_region_intensities[i] > a_intensity:
r_candidates.append(r_region_intensities[i])
r_candidate_entry_nums.append(r_region_entry_nums[i])
i -= 1
if i < 0:
break
if not r_candidates:
self.showMsg('No valid transition points found in designated R region.', bold=True, color='red')
# Sort the parallel lists into ascending entry number order
zipped_lists = zip(r_candidate_entry_nums, r_candidates)
sorted_pairs = sorted(zipped_lists)
tuples = zip(*sorted_pairs)
r_candidate_entry_nums, r_candidates = [list(item) for item in tuples]
print("r_candidates", r_candidates)
print("R entry nums", r_candidate_entry_nums)
if self.dLimits and self.rLimits:
self.eventType = 'DandR'
self.dRegion = None # Erases the coloring of the D region (when self.reDrawMainPlot is called)
self.rRegion = None # Erases the coloring of the R region (when self.reDrawMainPlot is called)
# Preserve these for possible next pass
self.d_candidates = d_candidates
self.d_candidate_entry_nums = d_candidate_entry_nums
self.r_candidates = r_candidates
self.r_candidate_entry_nums = r_candidate_entry_nums
self.penumbral_noise = (b_noise + a_noise) / 2.0
self.A = a_intensity
self.B = b_intensity
self.nBpts = num_b_pts
self.nApts = num_a_pts
self.sigmaA = a_noise
self.sigmaB = b_noise
self.snrA = (self.B - self.A) / self.sigmaA
self.snrB = (self.B - self.A) / self.sigmaB
# Get current underlying lightcurve
self.underlyingLightcurveAns = self.demoUnderlyingLightcurves(baseline=self.B, event=self.A,
plots_wanted=False)
# If an error in data entry has occurred, ans will be None
if self.underlyingLightcurveAns is None:
self.showMsg(f'An error in the underlying lightcurve parameters has occurred.', bold=True, color='red')
return
if len(self.d_candidates) > 0 and len(self.r_candidates) > 0:
self.eventType = 'DandR'
elif len(self.d_candidates) > 0:
self.eventType = 'Donly'
else:
self.eventType = 'Ronly'
if self.eventType in ['Donly', 'DandR']:
d_list = []
for i in range(len(self.d_candidates)):
newD = self.dEdgeCorrected(self.d_candidates[i], self.d_candidate_entry_nums[i])
if newD is not None:
d_list.append(newD)
else:
print('newD came back as None')
d_mean = np.mean(d_list)
# print(d_list, d_mean)
else:
d_mean = None
if self.eventType in ['Ronly', 'DandR']:
r_list = []
for i in range(len(self.r_candidates)):
newR = self.rEdgeCorrected(self.r_candidates[i], self.r_candidate_entry_nums[i])
if newR is not None:
r_list.append(newR)
else:
print('newR came back as None')
r_mean = np.mean(r_list)
# print(r_list, r_mean)
else:
r_mean = None
self.solution = [None, None] # Convert from tuple so that next line will be accepted
self.solution[0] = d_mean
self.solution[1] = r_mean
d_time_err_bar = r_time_err_bar = 0.0
if self.eventType in ['Donly', 'DandR']:
d_noise = self.penumbral_noise / np.sqrt(len(self.d_candidates))
mid_intensity = (self.B + self.A) / 2.0
d_time1 = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=mid_intensity - 2 * d_noise, edge_type='D')
d_time2 = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=mid_intensity + 2 * d_noise, edge_type='D')
d_time_err_bar = abs(d_time1 - d_time2) / 2.0
if self.eventType in ['Ronly', 'DandR']:
r_noise = self.penumbral_noise / np.sqrt(len(self.r_candidates))
mid_intensity = (self.B + self.A) / 2.0
r_time1 = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=mid_intensity - 2 * r_noise, edge_type='R')
r_time2 = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=mid_intensity + 2 * r_noise, edge_type='R')
r_time_err_bar = abs(r_time1 - r_time2) / 2.0
self.minusD = self.plusD = None
self.minusR = self.plusR = None
self.penumbralDerrBar = d_time_err_bar
self.penumbralRerrBar = r_time_err_bar
self.doPenumbralFitIterationReport()
if self.eventType in ['Donly', 'DandR']:
self.Dreport(d_time_err_bar, -d_time_err_bar)
if self.eventType in ['Ronly', 'DandR']:
self.Rreport(r_time_err_bar, -r_time_err_bar)
self.reDrawMainPlot()
self.drawSolution()
self.calcErrBars.setEnabled(True)
d_improved_msg = 'starting value'
r_improved_msg = 'starting value'
d_metric, r_metric = self.calculatePenumbralMetrics(d_mean, r_mean)
if self.eventType in ['Donly', 'DandR']:
if self.penumbralFitIterationNumber > 1:
if d_metric < self.lastDmetric:
d_improved_msg = 'improved'
elif d_metric > self.lastDmetric:
d_improved_msg = 'got worse'
else:
d_improved_msg = 'unchanged'
self.showMsg(f'D fit metric: {d_metric:0.1f} ({d_improved_msg})', bold=True, blankLine=False)
if self.eventType in ['Ronly', 'DandR']:
if self.penumbralFitIterationNumber > 1:
if r_metric < self.lastRmetric:
r_improved_msg = 'improved'
elif r_metric > self.lastRmetric:
r_improved_msg = 'got worse'
else:
r_improved_msg = 'unchanged'
self.showMsg(f'R fit metric: {r_metric:0.1f} ({r_improved_msg})', bold=True)
self.penumbralFitIterationNumber += 1
self.lastDmetric = d_metric
self.lastRmetric = r_metric
return
def calculatePenumbralMetrics(self, D=None, R=None):
d_metric = r_metric = None
time_ranges = self.getUnderlyingLightCurveTimeRanges()
if D is not None:
time_ranges[0] = time_ranges[0] / self.timeDelta + D
time_ranges[1] = time_ranges[1] / self.timeDelta + D
d_metric = 0.0
# print('\nd participants in metric')
# for i in range(int(np.ceil(time_ranges[0])), int(np.ceil(time_ranges[1]))):
# print(i, self.yValues[i], i - D,
# intensity_at_time(self.underlyingLightcurveAns, (i - D) * self.timeDelta, 'D'))
n_vals_in_metric = 0
for i in range(int(np.ceil(time_ranges[0])), int(np.ceil(time_ranges[1]))):
lightcurve_intensity = intensity_at_time(self.underlyingLightcurveAns, (i - D) * self.timeDelta, 'D')
d_metric += (self.yValues[i] - lightcurve_intensity)**2
n_vals_in_metric += 1
d_metric = d_metric / n_vals_in_metric
if R is not None:
time_ranges[2] = time_ranges[2] / self.timeDelta + R
time_ranges[3] = time_ranges[3] / self.timeDelta + R
r_metric = 0.0
# print('\nr participants in metric')
# for i in range(int(np.ceil(time_ranges[2])), int(np.ceil(time_ranges[3]))):
# print(i, self.yValues[i], i - R,
# intensity_at_time(self.underlyingLightcurveAns, (i - R) * self.timeDelta, 'R'))
n_vals_in_metric = 0
for i in range(int(np.ceil(time_ranges[2])), int(np.ceil(time_ranges[3]))):
lightcurve_intensity = intensity_at_time(self.underlyingLightcurveAns, (i - R) * self.timeDelta, 'R')
r_metric += (self.yValues[i] - lightcurve_intensity)**2
n_vals_in_metric += 1
r_metric = r_metric / n_vals_in_metric
return d_metric, r_metric
def doPenumbralFitIterationReport(self):
self.showMsg(f'Penumbral fit iteration {self.penumbralFitIterationNumber}:', bold=True, color='green')
if self.eventType == 'DandR':
self.showMsg(f'(in entryNum units) D: {self.solution[0]:0.4f} R: {self.solution[1]:0.4f}')
elif self.eventType == 'Donly':
self.showMsg(f'(in entryNum units) D: {self.solution[0]:0.4f}')
else: # Ronly
self.showMsg(f'(in entryNum units) R: {self.solution[1]:0.4f}')
self.doLightcurveParameterReport()
return
def doLightcurveParameterReport(self):
if self.enableDiffractionCalculationBox.isChecked():
self.showMsg(f'Diffraction effects included', blankLine=False)
else:
self.showMsg(f'Diffraction effects suppressed', blankLine=False)
self.showMsg(f'dist(AU): {self.asteroidDistanceEdit.text()}', blankLine=False)
self.showMsg(f'speed(km/sec): {self.shadowSpeedEdit.text()}', blankLine=False)
self.showMsg(f'Star diam(mas): {self.starDiameterEdit.text()}', blankLine=False)
self.showMsg(f'D limb angle: {self.dLimbAngle.value()}', blankLine=False)
self.showMsg(f'R limb angle: {self.rLimbAngle.value()}')
return
def rEdgeCorrected(self, r_best_value, r_best_value_index):
r_time_corr = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=r_best_value, edge_type='R')
if r_time_corr is not None:
r_delta = r_time_corr / self.timeDelta
r_adj = r_best_value_index + r_delta
return r_adj
else:
return None
def dEdgeCorrected(self, d_best_value, d_best_value_index):
d_time_corr = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=d_best_value, edge_type='D')
if d_time_corr is not None:
d_delta = d_time_corr / self.timeDelta
d_adj = d_best_value_index + d_delta
return d_adj
else:
return None
def getUnderlyingLightCurveTimeRanges(self):
# We use this routine to find the 'range' of times that are covered by the underlying lightcurve.
# The times returned are relative to the geometric edge (i.e., time = 0.00)
B = self.underlyingLightcurveAns['B']
A = self.underlyingLightcurveAns['A']
hi_intensity = B - 0.05 * (B - A)
lo_intensity = A + 0.05 * (B - A)
d_early_time = - time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=hi_intensity, edge_type='D')
d_late_time = - time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=lo_intensity, edge_type='D')
r_early_time = - time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=lo_intensity, edge_type='R')
r_late_time = - time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=hi_intensity, edge_type='R')
return [d_early_time, d_late_time, r_early_time, r_late_time]
def findEvent(self):
if self.timeDelta == 0.0:
self.showInfo(f'time per reading (timeDelta) has an invalid value of 0.0\n\nCannot proceed.')
return
if self.penumbralFitCheckBox.isChecked():
self.doPenumbralFit()
return
need_to_invite_user_to_verify_timestamps = False
if self.DandR.isChecked():
self.eventType = 'DandR'
self.showMsg('Locate a "D and R" event triggered')
elif self.Donly.isChecked():
self.eventType = 'Donly'
self.showMsg('Locate a "D only" event triggered')
else:
self.eventType = 'Ronly'
self.showMsg('Locate an "R only" event triggered')
minText = self.minEventEdit.text().strip()
maxText = self.maxEventEdit.text().strip()
self.minEvent = None
self.maxEvent = None
if minText and not maxText:
self.showInfo('If minEvent is filled in, so must be maxEvent')
return
if maxText and not minText:
self.showInfo('If maxEvent is filled in, so must be minEvent')
return
if minText:
if not minText.isnumeric():
self.showInfo('Invalid entry for min event (rdgs)')
else:
self.minEvent = int(minText)
if self.minEvent < 1:
self.showInfo('minEvent must be greater than 0')
return
if maxText:
if not maxText.isnumeric():
self.showInfo('Invalid entry for max event (rdgs)')
else:
self.maxEvent = int(maxText)
if self.maxEvent < self.minEvent:
self.showInfo('maxEvent must be >= minEvent')
return
if self.maxEvent > self.right - self.left - 1:
self.showInfo('maxEvent is too large for selected points')
return
if minText == '':
minText = '<blank>'
if maxText == '':
maxText = '<blank>'
self.showMsg('minEvent: ' + minText + ' maxEvent: ' + maxText)
candFrom, numCandidates = candidateCounter(eventType=self.eventType,
dLimits=self.dLimits, rLimits=self.rLimits,
left=self.left, right=self.right,
numPts=self.right - self.left + 1,
minSize=self.minEvent, maxSize=self.maxEvent)
if numCandidates < 0:
self.showInfo('Search parameters are not properly specified')
return
if candFrom == 'usedSize':
self.showMsg('Number of candidate solutions: ' + str(numCandidates) +
' (using event min/max entries)')
else:
self.showMsg(
'Number of candidate solutions: ' + str(numCandidates) +
' (using D/R region selections)')
self.runSolver = True
self.calcErrBars.setEnabled(False)
self.fillExcelReportButton.setEnabled(False)
if self.runSolver:
if self.eventType == 'DandR':
self.showMsg('New solver results...', color='blue', bold=True)
if candFrom == 'usedSize':
solverGen = find_best_event_from_min_max_size(
self.yValues, self.left, self.right,
self.minEvent, self.maxEvent)
else:
solverGen = locate_event_from_d_and_r_ranges(
self.yValues, self.left, self.right, self.dLimits[0],
self.dLimits[1], self.rLimits[0], self.rLimits[1])
elif self.eventType == 'Ronly':
self.showMsg('New solver results...', color='blue', bold=True)
if candFrom == 'usedSize':
pass
else:
self.minEvent = self.rLimits[0] - self.left
self.maxEvent = self.rLimits[1] - self.left
solverGen = \
find_best_r_only_from_min_max_size(
self.yValues, self.left, self.right, self.minEvent,
self.maxEvent)
else: # Donly
self.showMsg('New solver results...', color='blue', bold=True)
if candFrom == 'usedSize':
pass
else:
self.minEvent = self.right - self.dLimits[1]
self.maxEvent = self.right - self.dLimits[0] - 1
solverGen = \
find_best_d_only_from_min_max_size(
self.yValues, self.left, self.right, self.minEvent,
self.maxEvent)
if solverGen is None:
self.showInfo('Generator version not yet implemented')
return
self.cancelRequested = False
d = r = -1
b = a = 0.0
sigmaB = sigmaA = 0.0
for item in solverGen:
# if item[0] == 'fractionDone':
if item[0] == 1.0:
self.progressBar.setValue(int(item[1] * 100))
QtGui.QApplication.processEvents()
if self.cancelRequested:
self.cancelRequested = False
self.runSolver = False
self.showMsg('Solution search was cancelled')
self.progressBar.setValue(0)
return
# elif item[0] == 'no event present':
elif item[0] == -1.0:
self.showMsg(
'No event fitting search criteria could be found.')
self.progressBar.setValue(0)
self.runSolver = False
return
else:
# d, r, b, a, sigmaB, sigmaA, metric = item
_, _, d, r, b, a, sigmaB, sigmaA, metric = item
if d == -1.0:
d = None
if r == -1.0:
r = None
self.solution = (d, r)
self.progressBar.setValue(0)
self.showMsg('Integer (non-subframe) solution...', blankLine=False)
self.showMsg(
'sigB:%.2f sigA:%.2f B:%.2f A:%.2f' %
(sigmaB, sigmaA, b, a),
blankLine=False)
self.displaySolution(subframe=False) # First solution
# This fills in self.sigmaB and self.sigmaA
self.extract_noise_parameters_from_iterative_solution()
subDandR, new_b, new_a = subFrameAdjusted(
eventType=self.eventType, cand=(d, r), B=b, A=a,
sigmaB=self.sigmaB, sigmaA=self.sigmaA, yValues=self.yValues,
left=self.left, right=self.right)
# Here we apply the correction from our computed underlying lightcurves.
self.underlyingLightcurveAns = self.demoUnderlyingLightcurves(baseline=new_b, event=new_a, plots_wanted=False)
# If an error in data entry has occurred, ans will be None
if self.underlyingLightcurveAns is None:
self.showMsg(f'An error in the underlying lightcurve parameters has occurred.', bold=True, color='red')
return
# print(ans)
D = R = 0
if self.eventType == 'Donly' or self.eventType == 'DandR':
D = int(subDandR[0])
# self.showMsg(f'old D(subframe): {subDandR[0]:0.4f}')
if self.eventType == 'Ronly' or self.eventType == 'DandR':
R = int(subDandR[1])
# self.showMsg(f'old R(subframe): {subDandR[1]:0.4f}')
# print(f'D: {D} intensity(D): {self.yValues[D]}')
# print(f'R: {R} intensity(R): {self.yValues[R]}')
if (self.eventType == 'Donly' or self.eventType == 'DandR') and not D == subDandR[0]:
d_time_corr = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=self.yValues[D], edge_type='D')
d_delta = d_time_corr / self.timeDelta
d_adj = D + d_delta
# self.showMsg(f'd_time_correction: {d_time_corr:0.4f} new D: {d_adj:0.4f}')
subDandR[0] = d_adj
if (self.eventType == 'Ronly' or self.eventType == 'DandR') and not R == subDandR[1]:
r_time_corr = time_correction(correction_dict=self.underlyingLightcurveAns,
transition_point_intensity=self.yValues[R], edge_type='R')
r_delta = r_time_corr / self.timeDelta
r_adj = R + r_delta
# self.showMsg(f'r_time_correction: {r_time_corr:0.4f} new R: {r_adj:0.4f}')
subDandR[1] = r_adj
self.solution = subDandR
self.showMsg('Subframe adjusted solution...', blankLine=False)
self.showMsg(
'sigB:%.2f sigA:%.2f B:%.2f A:%.2f' %
(self.sigmaB, self.sigmaA, new_b, new_a),
blankLine=False)
need_to_invite_user_to_verify_timestamps = self.displaySolution() # Adjusted solution
self.B = new_b
self.A = new_a
# Activate this code if not using old solver following this.
if self.only_new_solver_wanted:
self.dRegion = None
self.rRegion = None
self.dLimits = None
self.rLimits = None
self.showMsg('... end New solver results', color='blue', bold=True)
if not self.only_new_solver_wanted:
# Proceed with old dual-pass 'solver'
self.solution = (None, None)
self.try_to_get_solution()
if self.solution:
self.showMsg(
'sigB:%.2f sigA:%.2f B:%.2f A:%.2f' %
(self.sigmaB, self.sigmaA, self.B, self.A),
blankLine=False)
self.displaySolution()
self.dRegion = None
self.rRegion = None
self.dLimits = None
self.rLimits = None
if self.runSolver and self.solution:
D, R = self.solution # type: int
if D is not None:
D = round(D, 4)
if R is not None:
R = round(R, 4)
self.solution = (D, R)
if self.eventType == 'DandR':
# ans = '(%.2f,%.2f) B: %.2f A: %.2f' % (D, R, self.B, self.A)
# Check for solution search based on min max event limits
if self.maxEvent is not None:
if (R - D) > self.maxEvent:
self.reDrawMainPlot()
self.showMsg('Invalid solution: max event limit constrained solution', color='red', bold=True)
self.showInfo('The solution is likely incorrect because the max event limit' +
' was set too low. Increase that limit and try again.')
return
if self.minEvent >= (R - D):
self.reDrawMainPlot()
self.showMsg('Invalid solution: min event limit constrained solution!', color='red', bold=True)
self.showInfo('The solution is likely incorrect because the min event limit' +
' was set too high. Decrease that limit and try again.')
return
pass
elif self.eventType == 'Donly':
# ans = '(%.2f,None) B: %.2f A: %.2f' % (D, self.B, self.A)
pass
elif self.eventType == 'Ronly':
# ans = '(None,%.2f) B: %.2f A: %.2f' % (R, self.B, self.A)
pass
else:
raise Exception('Undefined event type')
# self.showMsg('Raw solution (debug output): ' + ans)
elif self.runSolver:
self.showMsg('Event could not be found')
self.reDrawMainPlot()
self.calcErrBars.setEnabled(True)
if need_to_invite_user_to_verify_timestamps:
self.showInfo(f'The timing of the event found depends on the correctness '
f'of the timestamp assigned to the D and R frames. Since '
f'OCR may have produced incorrect values, the relevant video frames have been found '
f'and displayed for your inspection.\n\n'
f'Please verify visually that the timestamp values are correct.\n\n'
f'If they are wrong, note the correct values and use manual timestamp entry '
f'to "rescue" the observation.')
def fillTableViewOfData(self):
self.table.setRowCount(self.dataLen)
self.table.setVerticalHeaderLabels([str(i) for i in range(self.dataLen)])
# print(self.yFrame[0], self.yFrame[-1])
min_frame = int(trunc(float(self.yFrame[0])))
max_frame = int(trunc(float(self.yFrame[-1])))
# print(min_frame, max_frame)
if self.frameNumSpinBox.isEnabled():
self.frameNumSpinBox.setMinimum(min_frame)
self.frameNumSpinBox.setMaximum(max_frame)
for i in range(self.dataLen):
# newitem = QtGui.QTableWidgetItem(str(i))
# self.table.setItem(i, 0, newitem)
neatStr = fp.to_precision(self.yValues[i], 6)
newitem = QtGui.QTableWidgetItem(str(neatStr))
self.table.setItem(i, 2, newitem)
newitem = QtGui.QTableWidgetItem(str(self.yTimes[i]))
self.table.setItem(i, 1, newitem)
frameNum = float(self.yFrame[i])
if not np.ceil(frameNum) == np.floor(frameNum):
self.fieldMode = True
newitem = QtGui.QTableWidgetItem(str(self.yFrame[i]))
self.table.setItem(i, 0, newitem)
if len(self.LC2) > 0:
neatStr = fp.to_precision(self.LC2[i], 6)
newitem = QtGui.QTableWidgetItem(str(neatStr))
self.table.setItem(i, 3, newitem)
if len(self.LC3) > 0:
neatStr = fp.to_precision(self.LC3[i], 6)
newitem = QtGui.QTableWidgetItem(str(neatStr))
self.table.setItem(i, 4, newitem)
if len(self.LC4) > 0:
neatStr = fp.to_precision(self.LC4[i], 6)
newitem = QtGui.QTableWidgetItem(str(neatStr))
self.table.setItem(i, 5, newitem)
if len(self.extra) > 0:
for k, lightcurve in enumerate(self.extra):
neatStr = fp.to_precision(lightcurve[i], 6)
newitem = QtGui.QTableWidgetItem(str(neatStr))
self.table.setItem(i, 6 + k, newitem)
self.table.resizeColumnsToContents()
self.writeCSVButton.setEnabled(True)
def doManualTimestampEntry(self):
errmsg = ''
while errmsg != 'ok':
errmsg, manualTime, dataEntered, actualFrameCount, expectedFrameCount = \
manualTimeStampEntry(self.yFrame, TSdialog(), self.flashEdges)
if errmsg != 'ok':
if errmsg == 'cancelled':
return
else:
self.showInfo(errmsg)
else:
self.showMsg(dataEntered, bold=True)
if abs(actualFrameCount - expectedFrameCount) >= 0.12:
msg = (
f'Possible dropped readings !!!\n\n'
f'Reading count input: {actualFrameCount:.2f} \n\n'
f'Reading count computed from frame rate: {expectedFrameCount:.2f}'
)
self.showMsg(msg, color='red', bold=True)
self.showInfo(msg)
# If user cancelled out of timestamp entry dialog,
# then manualTime will be an empty list.
if manualTime:
self.yTimes = manualTime[:]
self.timeDelta, self.outliers, self.errRate = getTimeStepAndOutliers(
self.yTimes)
self.expDurEdit.setText(fp.to_precision(self.timeDelta, 6))
self.fillTableViewOfData()
self.reDrawMainPlot()
self.showMsg(
'timeDelta: ' + fp.to_precision(self.timeDelta, 6) +
' seconds per reading' +
' (timeDelta calculated from manual input timestamps)',
blankLine=False)
self.showMsg(
'timestamp error rate: ' + fp.to_precision(100 *
self.errRate,
3) + '%')
self.fillTableViewOfData()
def enableDisableFrameViewControls(self, state_to_set):
self.viewFrameButton.setEnabled(state_to_set)
self.frameNumSpinBox.setEnabled(state_to_set)
self.fieldViewCheckBox.setEnabled(state_to_set)
self.flipXaxisCheckBox.setEnabled(state_to_set)
self.flipYaxisCheckBox.setEnabled(state_to_set)
def readDataFromFile(self):
self.initializeVariablesThatDontDependOnAfile()
self.blockSize = 1
self.fieldMode = False
self.pathToVideo = None
self.enableDisableFrameViewControls(state_to_set=False)
self.disableAllButtons()
self.mainPlot.clear()
self.textOut.clear()
self.initializeTableView()
if self.externalCsvFilePath is None:
# Open a file select dialog
self.filename, _ = QFileDialog.getOpenFileName(
self, # parent
"Select light curve csv file", # title for dialog
self.settings.value('lightcurvedir', ""), # starting directory
"Csv files (*.csv)")
else:
self.filename = self.externalCsvFilePath
self.externalCsvFilePath = None
if self.filename:
self.setWindowTitle('PYOTE Version: ' + version.version() + ' File being processed: ' + self.filename)
dirpath, _ = os.path.split(self.filename)
self.logFile, _ = os.path.splitext(self.filename)
self.logFile = self.logFile + '.PYOTE.log'
curDateTime = datetime.datetime.today().ctime()
self.showMsg('')
self.showMsg('#' * 20 + ' PYOTE ' + version.version() + ' session started: ' + curDateTime + ' ' + '#' * 20)
# Make the directory 'sticky'
self.settings.setValue('lightcurvedir', dirpath)
self.settings.sync()
self.showMsg('filename: ' + self.filename, bold=True, color="red")
try:
self.outliers = []
frame, time, value, self.secondary, self.ref2, self.ref3, self.extra, \
self.aperture_names, self.headers = readLightCurve(self.filename)
values = [float(item) for item in value]
self.yValues = np.array(values) # yValues = curve to analyze
self.dataLen = len(self.yValues)
self.LC1 = np.array(values)
# Check headers to see if this is a PyMovie file. Grab the
# path to video file if it is a PyMovie file
for header in self.headers:
if header.startswith('# PyMovie') or header.startswith('Limovie'):
for line in self.headers:
if line.startswith('# source:') or line.startswith('"FileName :'):
if line.startswith('# source:'): # PyMovie format
self.pathToVideo = line.replace('# source:', '', 1).strip()
if line.startswith('"FileName :'): # Limovie format
self.pathToVideo = line.replace('"FileName :', '', 1).strip()
self.pathToVideo = self.pathToVideo.strip('"')
if os.path.isfile(self.pathToVideo):
_, ext = os.path.splitext(self.pathToVideo)
if ext == '.avi':
ans = readAviFile(0, self.pathToVideo)
if not ans['success']:
self.showMsg(
f'Attempt to read .avi file gave errmg: {ans["errmsg"]}',
color='red', bold=True)
self.pathToVideo = None
else:
self.showMsg(f'fourcc code of avi: {ans["fourcc"]}', blankLine=False)
self.showMsg(f'fps: {ans["fps"]}', blankLine=False)
self.showMsg(f'avi contains {ans["num_frames"]} frames')
# Enable frame view controls
self.enableDisableFrameViewControls(state_to_set=True)
elif ext == '.ser':
ans = readSerFile(0, self.pathToVideo)
if not ans['success']:
self.showMsg(
f'Attempt to read .ser file gave errmg: {ans["errmsg"]}',
color='red', bold=True)
self.pathToVideo = None
else:
# Enable frame view controls
self.enableDisableFrameViewControls(state_to_set=True)
elif ext == '':
ans = readFitsFile(0, self.pathToVideo)
if not ans['success']:
self.showMsg(
f'Attempt to read FITS folder gave errmg: {ans["errmsg"]}',
color='red', bold=True)
self.pathToVideo = None
else:
# Enable frame view controls
self.showMsg(f'{ans["num_frames"]} .fits files were found in FITS folder')
self.enableDisableFrameViewControls(state_to_set=True)
elif ext == '.adv':
# For now we assume that .adv files have embedded timestamps and
# so there is no need to display frames for visual OCR verification
self.pathToVideo = None
elif ext == '.aav':
ans = readAavFile(0, self.pathToVideo)
if not ans['success']:
self.showMsg(
f'Attempt to read .aav file gave errmg: {ans["errmsg"]}',
color='red', bold=True)
self.pathToVideo = None
else:
# Enable frame view controls
self.enableDisableFrameViewControls(state_to_set=True)
else:
self.showMsg(f'Unexpected file type of {ext} found.')
else:
self.showMsg(f'video source file {self.pathToVideo} could not be found.')
self.pathToVideo = None
# Automatically select all points
# noinspection PyUnusedLocal
self.yStatus = [INCLUDED for _i in range(self.dataLen)]
refStar = [float(item) for item in self.secondary]
vals = [float(item) for item in self.secondary]
self.LC2 = np.array(vals)
vals = [float(item) for item in self.ref2]
self.LC3 = np.array(vals)
vals = [float(item) for item in self.ref3]
self.LC4 = np.array(vals)
# A pymovie csv file can have more than 4 lightcurves. The 'extra' lightcurves beyond
# the standard max of 4 are placed in self.extra which is an array of lightcurves
if self.extra:
for i, light_curve in enumerate(self.extra):
vals = [float(item) for item in light_curve]
self.extra[i] = np.array(vals[:])
self.initializeTableView()
self.secondarySelector.setValue(1)
self.curveToAnalyzeSpinBox.setMaximum(1)
if self.secondary:
self.secondarySelector.setEnabled(True)
self.normLabel.setEnabled(True)
self.secondarySelector.setMaximum(2)
self.secondarySelector.setValue(2)
self.showSecondaryCheckBox.setEnabled(True)
self.showSecondaryCheckBox.setChecked(False)
self.curveToAnalyzeSpinBox.setMaximum(2)
if self.ref2:
self.secondarySelector.setEnabled(True)
self.secondarySelector.setMaximum(3)
self.curveToAnalyzeSpinBox.setMaximum(3)
if self.ref3:
self.secondarySelector.setMaximum(4)
self.curveToAnalyzeSpinBox.setMaximum(4)
if self.extra:
self.secondarySelector.setMaximum(4 + len(self.extra))
self.curveToAnalyzeSpinBox.setMaximum(4 + len(self.extra))
self.lightCurveNumberLabel.setEnabled(True)
self.curveToAnalyzeSpinBox.setEnabled(True)
# If no timestamps were found in the input file, prompt for manual entry
if self.timestampListIsEmpty(time):
self.showMsg('Manual entry of timestamps is required.',
bold=True)
# If the user knew there were no timestamps, the is no
# reason to show info box.
if not self.manualTimestampCheckBox.isChecked():
self.showInfo('This file does not contain timestamp '
'entries --- manual entry of two '
'timestamps is required.'
'\n\nEnter the timestamp '
'values that the avi '
'processing software (Limovie, Tangra, '
'etc) would have produced '
'had the OCR process not failed using the '
'View frame button to display the frames '
'you want to use for timestamp purposes.\n\n'
'By working in this manner, you can continue '
'processing the file as though OCR had '
'succeeded and then follow the standard '
'procedure for reporting results through '
'the IOTA event reporting spreadsheet ('
'which will make the needed corrections for camera delay and VTI offset).')
self.showMsg('=' * 20 + ' file header lines ' + '=' * 20, bold=True, blankLine=False)
for item in self.headers:
self.showMsg(item, blankLine=False)
self.showMsg('=' * 20 + ' end header lines ' + '=' * 20, bold=True)
self.yTimes = time[:]
self.yValues = np.array(values)
self.yValCopy = np.ndarray(shape=(len(self.yValues),))
np.copyto(self.yValCopy, self.yValues)
self.yRefStar = np.array(refStar)
self.yRefStarCopy = np.array(refStar)
if self.yRefStar.size > 0:
self.smoothSecondaryButton.setEnabled(True)
self.numSmoothPointsEdit.setEnabled(True)
self.dataLen = len(self.yValues)
self.yFrame = frame[:]
# Automatically select all points
# noinspection PyUnusedLocal
self.yStatus = [INCLUDED for _i in range(self.dataLen)]
self.left = 0
self.right = self.dataLen - 1
self.mainPlot.autoRange()
self.mainPlot.setMouseEnabled(x=True, y=False)
self.setDataLimits.setEnabled(True)
self.writePlot.setEnabled(True)
self.markDzone.setEnabled(True)
self.markRzone.setEnabled(True)
self.calcFlashEdge.setEnabled(True)
self.minEventEdit.setEnabled(True)
self.maxEventEdit.setEnabled(True)
self.locateEvent.setEnabled(True)
self.firstPassPenumbralFit = True
self.doBlockIntegration.setEnabled(True)
self.startOver.setEnabled(True)
self.fillTableViewOfData()
self.timeDelta, self.outliers, self.errRate = getTimeStepAndOutliers(self.yTimes)
self.expDurEdit.setText(fp.to_precision(self.timeDelta, 6))
self.showMsg('timeDelta: ' + fp.to_precision(self.timeDelta, 6) + ' seconds per reading', blankLine=False)
self.showMsg('timestamp error rate: ' + fp.to_precision(100 *
self.errRate, 3) + '%')
if self.outliers:
self.showTimestampErrors.setEnabled(True)
self.showTimestampErrors.setChecked(True)
self.reDrawMainPlot()
self.mainPlot.autoRange()
if self.timeDelta == 0.0 and not self.manualTimestampCheckBox.isChecked():
self.showInfo("Analysis of timestamp fields resulted in "
"an invalid timeDelta of 0.0\n\nSuggestion: Enable manual timestamp entry (checkbox at top center)"
", then press the now active 'Manual timestamp entry' button."
"\n\nThis will give you a chance to "
"manually correct the timestamps using "
"the data available in the table in the "
"lower left corner or incorporate flash timing data.")
except Exception as e:
self.showMsg(str(e))
def illustrateTimestampOutliers(self):
for pos in self.outliers:
vLine = pg.InfiniteLine(pos=pos+0.5, pen=(255, 0, 0))
self.mainPlot.addItem(vLine)
def prettyPrintCorCoefs(self):
outStr = 'noise corr coefs: ['
posCoefs = []
for coef in self.corCoefs:
if coef < acfCoefThreshold:
break
posCoefs.append(coef)
for i in range(len(posCoefs)-1):
outStr = outStr + fp.to_precision(posCoefs[i], 3) + ', '
outStr = outStr + fp.to_precision(posCoefs[-1], 3)
outStr = outStr + '] (based on ' + str(self.numPtsInCorCoefs) + ' points)'
outStr = outStr + ' sigmaB: ' + fp.to_precision(self.sigmaB, 4)
self.showMsg(outStr)
def processEventNoise(self, secondPass=False):
if len(self.selectedPoints) != 2:
self.showInfo('Exactly two points must be selected for this operation')
return
selPts = self.selectedPoints.keys()
left = int(min(selPts))
right = int(max(selPts))
if (right - left) < 9:
if secondPass:
self.removePointSelections()
self.sigmaA = self.sigmaB
return
else:
self.showInfo('At least 10 points must be included.')
return
if left < self.left or right > self.right:
self.showInfo('Selection point(s) outside of included data points')
self.removePointSelections()
return
else:
self.eventXvals = []
self.eventYvals = []
for i in range(left, right+1):
self.eventXvals.append(i)
self.eventYvals.append(self.yValues[i])
self.showSelectedPoints('Points selected for event noise '
'analysis: ')
# self.doNoiseAnalysis.setEnabled(True)
# self.computeSigmaA.setEnabled(True)
self.removePointSelections()
_, self.numNApts, self.sigmaA = getCorCoefs(self.eventXvals, self.eventYvals)
self.showMsg('Event noise analysis done using ' + str(self.numNApts) +
' points --- sigmaA: ' + fp.to_precision(self.sigmaA, 4))
self.reDrawMainPlot()
def processEventNoiseFromIterativeSolution(self, left, right):
if (right - left) < 9:
return
assert left >= self.left
assert right <= self.right
self.eventXvals = []
self.eventYvals = []
for i in range(left, right + 1):
self.eventXvals.append(i)
self.eventYvals.append(self.yValues[i])
_, self.numNApts, self.sigmaA = getCorCoefs(self.eventXvals,
self.eventYvals)
def processBaselineNoise(self, secondPass=False):
if len(self.selectedPoints) != 2:
self.showInfo('Exactly two points must be selected for this operation')
return
selPts = self.selectedPoints.keys()
left = int(min(selPts))
right = int(max(selPts))
if (right - left) < 14:
if secondPass:
self.removePointSelections()
return
else:
self.showInfo('At least 15 points must be included.')
return
if left < self.left or right > self.right:
self.showInfo('Selection point(s) outside of included data points')
return
else:
self.baselineXvals = []
self.baselineYvals = []
for i in range(left, right+1):
self.baselineXvals.append(i)
self.baselineYvals.append(self.yValues[i])
self.showSelectedPoints('Points selected for baseline noise '
'analysis: ')
# self.doNoiseAnalysis.setEnabled(True)
# self.computeSigmaA.setEnabled(True)
self.removePointSelections()
self.newCorCoefs, self.numNApts, sigB = getCorCoefs(self.baselineXvals, self.baselineYvals)
self.showMsg('Baseline noise analysis done using ' + str(self.numNApts) +
' baseline points')
if len(self.corCoefs) == 0:
self.corCoefs = np.ndarray(shape=(len(self.newCorCoefs),))
np.copyto(self.corCoefs, self.newCorCoefs)
self.numPtsInCorCoefs = self.numNApts
self.sigmaB = sigB
else:
totalPoints = self.numNApts + self.numPtsInCorCoefs
self.corCoefs = (self.corCoefs * self.numPtsInCorCoefs +
self.newCorCoefs * self.numNApts) / totalPoints
self.sigmaB = (self.sigmaB * self.numPtsInCorCoefs +
sigB * self.numNApts) / totalPoints
self.numPtsInCorCoefs = totalPoints
self.prettyPrintCorCoefs()
# Try to warn user about the possible need for block integration by testing the lag 1
# and lag 2 correlation coefficients. The tests are just guesses on my part, so only
# warnings are given. Later, the Cholesky-Decomposition may fail because block integration
# was really needed. That is a fatal error but is trapped and the user alerted to the problem
if len(self.corCoefs) > 1:
if self.corCoefs[1] >= 0.7:
self.showInfo('The auto-correlation coefficient at lag 1 is suspiciously large. '
'This may be because the light curve needs some degree of block integration. '
'Failure to do a needed block integration allows point-to-point correlations caused by '
'the camera integration to artificially induce non-physical correlated noise.')
elif len(self.corCoefs) > 2:
if self.corCoefs[2] >= 0.3:
self.showInfo('The auto-correlation coefficient at lag 2 is suspiciously large. '
'This may be because the light curve needs some degree of block integration. '
'Failure to do a needed block integration allows point-to-point correlations caused by '
'the camera integration to artificially induce non-physical correlated noise.')
if self.sigmaA is None:
self.sigmaA = self.sigmaB
self.reDrawMainPlot()
self.locateEvent.setEnabled(True)
self.markDzone.setEnabled(True)
self.markRzone.setEnabled(True)
self.minEventEdit.setEnabled(True)
self.maxEventEdit.setEnabled(True)
def processBaselineNoiseFromIterativeSolution(self, left, right):
# if (right - left) < 14:
# return 'Failed'
assert left >= self.left
assert right <= self.right
self.baselineXvals = []
self.baselineYvals = []
for i in range(left, right + 1):
self.baselineXvals.append(i)
self.baselineYvals.append(self.yValues[i])
self.newCorCoefs, self.numNApts, sigB = getCorCoefs(self.baselineXvals,
self.baselineYvals)
if len(self.corCoefs) == 0:
self.corCoefs = np.ndarray(shape=(len(self.newCorCoefs),))
np.copyto(self.corCoefs, self.newCorCoefs)
self.numPtsInCorCoefs = self.numNApts
self.sigmaB = sigB
else:
totalPoints = self.numNApts + self.numPtsInCorCoefs
self.corCoefs = (self.corCoefs * self.numPtsInCorCoefs +
self.newCorCoefs * self.numNApts) / totalPoints
self.sigmaB = (self.sigmaB * self.numPtsInCorCoefs +
sigB * self.numNApts) / totalPoints
self.numPtsInCorCoefs = totalPoints
def removePointSelections(self):
for i, oldStatus in self.selectedPoints.items():
self.yStatus[i] = oldStatus
self.selectedPoints = {}
def disableAllButtons(self):
self.calcFlashEdge.setEnabled(False)
self.lightCurveNumberLabel.setEnabled(False)
self.normLabel.setEnabled(False)
self.curveToAnalyzeSpinBox.setEnabled(False)
self.showSecondaryCheckBox.setEnabled(False)
self.secondarySelector.setEnabled(False)
self.normalizeButton.setEnabled(False)
self.smoothSecondaryButton.setEnabled(False)
self.numSmoothPointsEdit.setEnabled(False)
self.setDataLimits.setEnabled(False)
self.doBlockIntegration.setEnabled(False)
self.locateEvent.setEnabled(False)
self.calcErrBars.setEnabled(False)
self.fillExcelReportButton.setEnabled(False)
self.startOver.setEnabled(False)
self.markDzone.setEnabled(False)
self.markRzone.setEnabled(False)
self.numSmoothPointsEdit.setEnabled(False)
self.minEventEdit.setEnabled(False)
self.maxEventEdit.setEnabled(False)
self.writeBarPlots.setEnabled(False)
self.writeCSVButton.setEnabled(False)
# noinspection PyUnusedLocal
def restart(self):
savedFlashEdges = self.flashEdges
self.initializeVariablesThatDontDependOnAfile()
self.flashEdges = savedFlashEdges
self.disableAllButtons()
self.firstPassPenumbralFit = True
self.lightCurveNumberLabel.setEnabled(True)
self.curveToAnalyzeSpinBox.setEnabled(True)
self.normLabel.setEnabled(True)
if self.errBarWin:
self.errBarWin.close()
self.dataLen = len(self.yTimes)
self.timeDelta, self.outliers, self.errRate = getTimeStepAndOutliers(self.yTimes)
self.expDurEdit.setText(fp.to_precision(self.timeDelta, 6))
self.fillTableViewOfData()
if len(self.yRefStar) > 0:
self.showSecondaryCheckBox.setEnabled(True)
self.smoothSecondaryButton.setEnabled(True)
self.numSmoothPointsEdit.setEnabled(True)
self.secondarySelector.setEnabled(True)
# Enable the initial set of buttons (allowed operations)
self.startOver.setEnabled(True)
self.setDataLimits.setEnabled(True)
self.markDzone.setEnabled(True)
self.markRzone.setEnabled(True)
self.locateEvent.setEnabled(True)
self.minEventEdit.setEnabled(True)
self.maxEventEdit.setEnabled(True)
# Reset the data plot so that all points are visible
self.mainPlot.autoRange()
# Show all data points as INCLUDED
self.yStatus = [INCLUDED for _i in range(self.dataLen)]
# Set the 'left' and 'right' edges of 'included' data to 'all'
self.left = 0
self.right = self.dataLen - 1
self.minEventEdit.clear()
self.maxEventEdit.clear()
self.reDrawMainPlot()
self.mainPlot.autoRange()
self.showMsg('*' * 20 + ' starting over ' + '*' * 20, color='blue')
def drawSolution(self):
def plot(x, y, pen):
self.mainPlot.plot(x, y, pen=pen, symbol=None)
def plotDcurve():
# The units of self.timeDelta are seconds per entry, so the conversion in the next line
# gets us x_values[] converted to entryNum units.
x_values = self.underlyingLightcurveAns['time deltas'] / self.timeDelta
x_values += D
y_values = self.underlyingLightcurveAns['D curve']
if self.underlyingLightcurveAns['star D'] is not None:
z_values = self.underlyingLightcurveAns['star D']
else:
z_values = self.underlyingLightcurveAns['raw D']
x_trimmed = []
y_trimmed = []
z_trimmed = []
for i in range(x_values.size):
if self.left <= x_values[i] <= max_x:
x_trimmed.append(x_values[i])
y_trimmed.append(y_values[i])
z_trimmed.append(z_values[i])
# (150, 100, 100) is the brownish color we use to show the underlying lightcurve
if self.showUnderlyingLightcurveCheckBox.isChecked():
plot(x_trimmed, z_trimmed, pen=pg.mkPen((150, 100, 100), width=self.lineWidthSpinner.value()))
# Now overplot with the blue camera response curve
plot(x_trimmed, y_trimmed, pen=pg.mkPen((0, 0, 255), width=self.lineWidthSpinner.value()))
# Extend camera response to the left and right if necessary...
if x_trimmed:
if x_trimmed[0] > self.left:
plot([self.left, x_trimmed[0]], [y_trimmed[0], y_trimmed[0]], pen=pg.mkPen((0, 0, 255), width=self.lineWidthSpinner.value()))
if x_trimmed[-1] < max_x:
plot([x_trimmed[-1], max_x], [y_trimmed[-1], y_trimmed[-1]], pen=pg.mkPen((0, 0, 255), width=self.lineWidthSpinner.value()))
def plotRcurve():
# The units of self.timeDelta are seconds per entry, so the conversion in the next line
# gets us x_values[] converted to entryNum units.
x_values = self.underlyingLightcurveAns['time deltas'] / self.timeDelta
x_values += R
y_values = self.underlyingLightcurveAns['R curve']
if self.underlyingLightcurveAns['star R'] is not None:
z_values = self.underlyingLightcurveAns['star R']
else:
z_values = self.underlyingLightcurveAns['raw R']
x_trimmed = []
y_trimmed = []
z_trimmed = []
for i in range(x_values.size):
if min_x <= x_values[i] <= self.right:
x_trimmed.append(x_values[i])
y_trimmed.append(y_values[i])
z_trimmed.append(z_values[i])
# (150, 100, 100) is the brownish color we use to show the underlying lightcurve
if self.showUnderlyingLightcurveCheckBox.isChecked():
plot(x_trimmed, z_trimmed, pen=pg.mkPen((150, 100, 100), width=self.lineWidthSpinner.value()))
# Now overplot with the blue camera response curve
plot(x_trimmed, y_trimmed, pen=pg.mkPen((0, 0, 255), width=self.lineWidthSpinner.value()))
# Extend camera response to the left and right if necessary...
if x_trimmed:
if x_trimmed[0] > min_x:
plot([min_x, x_trimmed[0]], [y_trimmed[0], y_trimmed[0]], pen=pg.mkPen((0, 0, 255), width=self.lineWidthSpinner.value()))
if x_trimmed[-1] < self.right:
plot([x_trimmed[-1], self.right], [y_trimmed[-1], y_trimmed[-1]], pen=pg.mkPen((0, 0, 255), width=self.lineWidthSpinner.value()))
def plotGeometricShadowAtD():
if self.showEdgesCheckBox.isChecked():
pen = pg.mkPen(color=(255, 0, 0), style=QtCore.Qt.DashLine, width=self.lineWidthSpinner.value())
self.mainPlot.plot([D, D], [lo_int, hi_int], pen=pen, symbol=None)
def plotGeometricShadowAtR():
if self.showEdgesCheckBox.isChecked():
pen = pg.mkPen(color=(0, 200, 0), style=QtCore.Qt.DashLine, width=self.lineWidthSpinner.value())
self.mainPlot.plot([R, R], [lo_int, hi_int], pen=pen, symbol=None)
hi_int = max(self.yValues[self.left:self.right])
lo_int = min(self.yValues[self.left:self.right])
if self.eventType == 'DandR':
D = self.solution[0]
R = self.solution[1]
max_x = min_x = (D + R) / 2.0
plotDcurve()
plotGeometricShadowAtD()
plotRcurve()
plotGeometricShadowAtR()
elif self.eventType == 'Donly':
D = self.solution[0]
max_x = self.right
plotDcurve()
plotGeometricShadowAtD()
elif self.eventType == 'Ronly':
R = self.solution[1]
min_x = self.left
plotRcurve()
plotGeometricShadowAtR()
else:
raise Exception('Unrecognized event type of |' + self.eventType + '|')
def calcNumBandApoints(self):
if self.eventType == 'Donly':
self.nBpts = self.solution[0] - self.left
self.nApts = self.right - self.solution[0] - 1
if self.eventType == 'Ronly':
self.nBpts = self.right - self.solution[1]
self.nApts = self.solution[1] - self.left
if self.eventType == 'DandR':
self.nBpts = self.right - self.solution[1] + self.solution[0] - self.left
self.nApts = self.solution[1] - self.solution[0] - 1
if self.nBpts < 1:
self.nBpts = 1
if self.nApts < 1:
self.nApts = 1
def drawEnvelope(self):
# (150, 100, 100) is the brownish color we use to show the underlying lightcurve
# def plot(x, y):
# self.mainPlot.plot(x, y, pen=pg.mkPen((150, 100, 100), width=2), symbol=None)
def plotGeometricShadowAtD(d):
if self.showErrBarsCheckBox.isChecked():
pen = pg.mkPen(color=(255, 0, 0), style=QtCore.Qt.DotLine, width=self.lineWidthSpinner.value())
self.mainPlot.plot([d, d], [lo_int, hi_int], pen=pen, symbol=None)
def plotGeometricShadowAtR(r):
if self.showErrBarsCheckBox.isChecked():
pen = pg.mkPen(color=(0, 200, 0), style=QtCore.Qt.DotLine, width=self.lineWidthSpinner.value())
self.mainPlot.plot([r, r], [lo_int, hi_int], pen=pen, symbol=None)
if self.solution is None:
return
# Make shortened geometric shadow markers to distinguish the error bar versions from the central value
hi_int = max(self.yValues[self.left:self.right])
lo_int = min(self.yValues[self.left:self.right])
delta_int = (hi_int - lo_int) * 0.1
hi_int -= delta_int
lo_int += delta_int
if self.eventType == 'Donly':
D = self.solution[0]
Dright = D + self.plusD
Dleft = D - self.minusD
plotGeometricShadowAtD(Dright)
plotGeometricShadowAtD(Dleft)
return
if self.eventType == 'Ronly':
R = self.solution[1]
Rright = R + self.plusR
Rleft = R - self.minusR
plotGeometricShadowAtR(Rright)
plotGeometricShadowAtR(Rleft)
return
if self.eventType == 'DandR':
R = self.solution[1]
D = self.solution[0]
Rright = R + self.plusR
Rleft = R - self.minusR
Dright = D + self.plusD
Dleft = D - self.minusD
plotGeometricShadowAtR(Rright)
plotGeometricShadowAtR(Rleft)
plotGeometricShadowAtD(Dright)
plotGeometricShadowAtD(Dleft)
return
def reDrawMainPlot(self):
self.mainPlot.clear()
if self.yValues is None:
return
if self.showTimestampErrors.checkState():
self.illustrateTimestampOutliers()
# # Automatically show timestamp errors at final report
# if self.minusD is not None or self.minusR is not None:
# self.illustrateTimestampOutliers()
self.mainPlot.addItem(self.verticalCursor)
self.mainPlot.plot(self.yValues)
try:
x = [i for i in range(self.dataLen) if self.yStatus[i] == INCLUDED]
y = [self.yValues[i] for i in range(self.dataLen) if self.yStatus[i] == INCLUDED]
self.mainPlot.plot(x, y, pen=None, symbol='o',
symbolBrush=(0, 0, 255), symbolSize=6)
x = [i for i in range(self.dataLen) if self.yStatus[i] == BASELINE]
y = [self.yValues[i] for i in range(self.dataLen) if self.yStatus[i] == BASELINE]
self.mainPlot.plot(x, y, pen=None, symbol='o',
symbolBrush=(0, 200, 200), symbolSize=6)
x = [i for i in range(self.dataLen) if self.yStatus[i] == SELECTED]
y = [self.yValues[i] for i in range(self.dataLen) if self.yStatus[i] == SELECTED]
self.mainPlot.plot(x, y, pen=None, symbol='o',
symbolBrush=(255, 0, 0), symbolSize=10)
except IndexError:
pass
if self.showSecondaryCheckBox.isChecked() and len(self.yRefStar) == self.dataLen:
self.mainPlot.plot(self.yRefStar)
if self.right is not None:
right = min(self.dataLen, self.right+1)
else:
right = self.dataLen
if self.left is None:
left = 0
else:
left = self.left
x = [i for i in range(left, right)]
y = [self.yRefStar[i]for i in range(left, right)]
self.mainPlot.plot(x, y, pen=None, symbol='o',
symbolBrush=(0, 255, 0), symbolSize=6)
if len(self.smoothSecondary) > 0:
self.mainPlot.plot(x, self.smoothSecondary,
pen=pg.mkPen((100, 100, 100), width=4), symbol=None)
if self.dRegion is not None:
self.mainPlot.addItem(self.dRegion)
if self.rRegion is not None:
self.mainPlot.addItem(self.rRegion)
if self.solution:
self.drawSolution()
if self.minusD is not None or self.minusR is not None:
# We have data for drawing an envelope
self.drawEnvelope()
def showSelectedPoints(self, header):
selPts = list(self.selectedPoints.keys())
selPts.sort()
self.showMsg(header + str(selPts))
def doTrim(self):
if len(self.selectedPoints) != 0:
if len(self.selectedPoints) != 2:
self.showInfo('Exactly two points must be selected for a trim operation')
return
self.showSelectedPoints('Data trimmed/selected using points: ')
selPts = list(self.selectedPoints.keys())
selPts.sort()
self.left = selPts[0]
self.right = selPts[1]
else:
# self.showInfo('All points will be selected (because no trim points specified)')
self.showMsg('All data points were selected')
self.left = 0
self.right = self.dataLen - 1
self.smoothSecondary = []
if len(self.yRefStar) > 0:
self.smoothSecondaryButton.setEnabled(True)
self.numSmoothPointsEdit.setEnabled(True)
for i in range(0, self.left):
self.yStatus[i] = EXCLUDED
for i in range(min(self.dataLen, self.right+1), self.dataLen):
self.yStatus[i] = EXCLUDED
for i in range(self.left, min(self.dataLen, self.right+1)):
self.yStatus[i] = INCLUDED
self.selectedPoints = {}
self.reDrawMainPlot()
self.doBlockIntegration.setEnabled(False)
self.mainPlot.autoRange()
def main(csv_file_path=None):
# csv_file_path gets filled in by PyMovie
os.environ['QT_MAC_WANTS_LAYER'] = '1' # This line needed when Mac updated to Big Sur
import traceback
QtGui.QApplication.setStyle('fusion')
app = QtGui.QApplication(sys.argv)
if sys.platform == 'linux':
print(f'os: Linux')
elif sys.platform == 'darwin':
print(f'os: MacOS')
else:
print(f'os: Windows')
app.setStyleSheet("QLabel, QPushButton, QToolButton, QCheckBox, "
"QRadioButton, QLineEdit , QTextEdit {font-size: 8pt}")
# Save the current/proper sys.excepthook object
# sys._excepthook = sys.excepthook
saved_excepthook = sys.excepthook
def exception_hook(exctype, value, tb):
print('')
print('=' * 30)
print(value)
print('=' * 30)
print('')
traceback.print_tb(tb)
# Call the usual exception processor
# sys._excepthook(exctype, value, tb)
saved_excepthook(exctype, value, tb)
# Exit if you prefer...
# sys.exit(1)
sys.excepthook = exception_hook
form = SimplePlot(csv_file_path)
form.show()
app.exec_()
if __name__ == '__main__':
main()
| mit |
cl4rke/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
madjelan/Kaggler | kaggler/online_model/DecisionTree/OnlineClassificationTree.py | 2 | 3156 | from _tree import Tree
from OnlineDecisionTree import *
from utils import *
import numpy as np
import pandas as pd
class ClassificationTree(Tree):
def __init__(
self,
number_of_features,
number_of_functions=10,
min_sample_split=200,
predict_initialize={
'count_dict': {},
}
):
# Constant values
self.number_of_features = number_of_features
self.number_of_functions = number_of_functions
self.min_sample_split = min_sample_split
self.predict_initialize = predict_initialize
self.max_sample = 1000
# Dynamic values
self.left = None
self.right = None
self.randomly_selected_features = []
self._randomly_select()
self.criterion = None
def _calculate_split_score(self, split):
"""
calculate the score of the split:
score = current_error - after_split_error
"""
left_error = gini(split['left'])
right_error = gini(split['right'])
error = gini(self.Y)
# if the split is any good, the score should be greater than 0
total = float(len(self.Y))
score = error - 1 / total * (len(split['left']) * left_error\
+ len(split['right']) * right_error)
return score
def _apply_best_split(self):
best_split, best_split_score = self._find_best_split()
if best_split_score > 0:
self.criterion = lambda x : x[best_split['feature']] \
> best_split['value']
# create the left child
self.left = ClassificationTree(
number_of_features=self.number_of_features,
number_of_functions=self.number_of_functions,
min_sample_split=self.min_sample_split,
predict_initialize={
'count_dict': count_dict(best_split['left']),
}
)
# create the right child
self.right = ClassificationTree(
number_of_features=self.number_of_features,
number_of_functions=self.number_of_functions,
min_sample_split=self.min_sample_split,
predict_initialize={
'count_dict': count_dict(best_split['right']),
}
)
# Collect garbage
self.samples = {}
self.Y = []
def predict(self, x):
"""
Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent.
"""
if self._is_leaf():
d1 = self.predict_initialize['count_dict']
d2 = count_dict(self.Y)
for key, value in d1.iteritems():
if key in d2:
d2[key] += value
else:
d2[key] = value
return argmax(d2)
else:
if self.criterion(x):
return self.right.predict(x)
else:
return self.left.predict(x)
| gpl-3.0 |
PatrickChrist/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
PMBio/limix | limix/deprecated/io/data_util.py | 2 | 6701 | # Copyright(c) 2014, The LIMIX developers (Christoph Lippert, Paolo Francesco Casale, Oliver Stegle)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy as sp
import pandas as pd
try:
#see if fastlmm is in the path for the fast C-based parser
import fastlmm.pyplink.wrap_plink_parser as parser
cparser=True
except:
cparser=False
pass
def estCumPos(position,offset=0,chrom_len=None):
'''
compute the cumulative position of each variant given the position and the chromosome
Also return the starting cumulativeposition of each chromosome
Args:
position: pandas DataFrame of basepair positions (key='pos') and chromosome values (key='chrom')
The DataFrame will be updated with field 'pos_cum'
chrom_len: vector with predefined chromosome length
offset: offset between chromosomes for cumulative position (default 0 bp)
Returns:
chrom_pos,position:
chrom_pos: numpy.array of starting cumulative positions for each chromosome
position: augmented position object where cumulative positions are defined
'''
RV = position.copy()
chromvals = sp.unique(position['chrom'])# sp.unique is always sorted
chrom_pos_cum= sp.zeros_like(chromvals)#get the starting position of each Chrom
pos_cum= sp.zeros_like(position.shape[0])
if not 'pos_cum' in position:
RV["pos_cum"]= sp.zeros_like(position['pos'])#get the cum_pos of each variant.
pos_cum=RV['pos_cum'].values
maxpos_cum=0
for i,mychrom in enumerate(chromvals):
chrom_pos_cum[i] = maxpos_cum
i_chr=position['chrom']==mychrom
if chrom_len is None:
maxpos = position['pos'][i_chr].max()+offset
else:
maxpos = chrom_len[i]+offset
pos_cum[i_chr.values]=maxpos_cum+position.loc[i_chr,'pos']
maxpos_cum+=maxpos
return RV,chrom_pos_cum
def imputeMissing(X, center=True, unit=True, betaNotUnitVariance=False, betaA=1.0, betaB=1.0):
'''
fill in missing values in the SNP matrix by the mean value
optionally center the data and unit-variance it
Args:
X: scipy.array of SNP values. If dtype=='int8' the missing values are -9,
otherwise the missing values are scipy.nan
center: Boolean indicator if data should be mean centered
Not supported in C-based parser
unit: Boolean indicator if data should be normalized to have unit variance
Not supported in C-based parser
betaNotUnitVariance: use Beta(betaA,betaB) standardization instead of unit variance
(only with C-based parser) (default: False)
betaA: shape parameter for Beta(betaA,betaB) standardization (only with C-based parser)
betaB: scale parameter for Beta(betaA,betaB) standardization (only with C-based parser)
Returns:
X: scipy.array of standardized SNPs with scipy.float64 values
'''
typeX=X.dtype
if typeX!= sp.int8:
iNanX = X!=X
else:
iNanX = X==-9
if iNanX.any() or betaNotUnitVariance:
if cparser and center and (unit or betaNotUnitVariance):
print("using C-based imputer")
if X.flags["C_CONTIGUOUS"] and typeX== sp.float32:
parser.standardizefloatCAAA(X,betaNotUnitVariance=betaNotUnitVariance,betaA=betaA,betaB=betaB)
X= sp.array(X,dtype= sp.float64)
elif X.flags["C_CONTIGUOUS"] and typeX== sp.float64:
parser.standardizedoubleCAAA(X,betaNotUnitVariance=betaNotUnitVariance,betaA=betaA,betaB=betaB)
elif X.flags["F_CONTIGUOUS"] and typeX== sp.float32:
parser.standardizefloatFAAA(X,betaNotUnitVariance=betaNotUnitVariance,betaA=betaA,betaB=betaB)
X= sp.array(X,dtype= sp.float64)
elif X.flags["F_CONTIGUOUS"] and typeX== sp.float64:
parser.standardizedoubleFAAA(X,betaNotUnitVariance=betaNotUnitVariance,betaA=betaA,betaB=betaB)
else:
X= sp.array(X,order="F",dtype= sp.float64)
X[iNanX]= sp.nan
parser.standardizedoubleFAAA(X,betaNotUnitVariance=betaNotUnitVariance,betaA=betaA,betaB=betaB)
elif betaNotUnitVariance:
raise NotImplementedError("Beta(betaA,betaB) standardization only in C-based parser, but not found")
else:
nObsX = (~iNanX).sum(0)
if typeX!= sp.float64:
X= sp.array(X,dtype= sp.float64)
X[iNanX] = 0.0
sumX = (X).sum(0)
meanX = sumX/nObsX
if center:
X-=meanX
X[iNanX] = 0.0
X_=X
else:
mean= sp.tile(meanX,(X.shape[0],1))
X[iNanX]=mean[iNanX]
X_=X-mean
if unit:
stdX = sp.sqrt((X_*X_).sum(0)/nObsX)
stdX[stdX==0.0]=1.0
X/=stdX
else:
if X.dtype!= sp.float64:
X= sp.array(X,dtype= sp.float64)
if center:
X-= X.mean(axis=0)
if unit:
stdX= X.std(axis=0)
stdX[stdX==0.0]=1.0
X/=stdX
return X
def merge_indices(indices,header=None,join="inner"):
"""
returns a merged index
Args:
indices: list of indices (e.g. individual IDs)
header: list with name of each element of indices (e.g. ["pheno","geno","covars"])
join: type of join to perform (standard is "inner"
Returns:
pandas DataFrame with merged indices
"""
indexpd = []
for i, index in enumerate(indices):
if header is None:
header_=[i]
else:
header_=[header[i]]
indexpd.append(pd.DataFrame(data= sp.arange(len(index)),index=index,columns=header_) )
ret = pd.concat(objs=indexpd, axis=1, join=join)
return ret
if __name__ == "__main__":
lists=[["a","b"],["a","c","b"],["d","a","b"]]
header = [["bl"],["n"],["s"]]
merge=merge_indices(lists, header=None, join="outer")
| apache-2.0 |
smartscheduling/scikit-learn-categorical-tree | sklearn/linear_model/stochastic_gradient.py | 8 | 50342 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'auto':
raise ValueError("class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight('auto', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.")
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = y.astype(np.float64)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
`average_coef_` : array, shape (n_features,)
Averaged weights assigned to the features.
`average_intercept_` : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
alvarofierroclavero/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
yandex/yandex-tank | yandextank/plugins/Bfg/reader.py | 2 | 2623 | import pandas as pd
import time
import itertools as itt
from queue import Empty
from threading import Lock
import threading as th
import logging
logger = logging.getLogger(__name__)
def records_to_df(records):
records = pd.DataFrame.from_records(records)
records['receive_ts'] = records['send_ts'] + records['interval_real'] / 1e6
records['receive_sec'] = records.receive_ts.astype(int)
# TODO: consider configuration for the following:
records['tag'] = records.tag.str.rsplit('#', 1, expand=True)[0]
records.set_index(['receive_sec'], inplace=True)
return records
def _expand_steps(steps):
return list(itt.chain(
* [[rps] * int(duration) for rps, duration in steps]))
class BfgReader(object):
def __init__(self, results, closed):
self.buffer = ""
self.stat_buffer = ""
self.results = results
self.closed = closed
self.records = []
self.lock = Lock()
self.thread = th.Thread(target=self._cacher)
self.thread.start()
def _cacher(self):
while True:
try:
self.records.append(
self.results.get(block=False))
except Empty:
if not self.closed.is_set():
time.sleep(0.1)
else:
break
def __next__(self):
if self.closed.is_set():
self.thread.join()
raise StopIteration
with self.lock:
records = self.records
self.records = []
if records:
return records_to_df(records)
return None
def __iter__(self):
return self
class BfgStatsReader(object):
def __init__(self, instance_counter, steps):
self.closed = False
self.last_ts = 0
self.steps = _expand_steps(steps)
self.instance_counter = instance_counter
self.start_time = int(time.time())
def __iter__(self):
while not self.closed:
cur_ts = int(time.time())
if cur_ts > self.last_ts:
offset = cur_ts - self.start_time
reqps = 0
if offset >= 0 and offset < len(self.steps):
reqps = self.steps[offset]
yield [{
'ts': cur_ts,
'metrics': {
'instances': self.instance_counter.value,
'reqps': reqps
}
}]
self.last_ts = cur_ts
else:
yield []
def close(self):
self.closed = True
| lgpl-2.1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.