repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ThomasMiconi/htmresearch | projects/sequence_prediction/discrete_sequences/plotSequenceLengthExperiment.py | 12 | 6654 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot temporal noise experiment result
"""
import os
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
from plot import plotAccuracy
from plot import movingAverage
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
def computeAccuracyEnding(predictions, truths, iterations,
resets=None, randoms=None, num=None,
sequenceCounter=None):
"""
Compute accuracy on the sequence ending
"""
accuracy = []
numIteration = []
numSequences = []
for i in xrange(len(predictions) - 1):
if num is not None and i > num:
continue
if truths[i] is None:
continue
# identify the end of sequence
if resets is not None or randoms is not None:
if not (resets[i+1] or randoms[i+1]):
continue
correct = truths[i] is None or truths[i] in predictions[i]
accuracy.append(correct)
numSequences.append(sequenceCounter[i])
numIteration.append(iterations[i])
return (accuracy, numIteration, numSequences)
def computeAccuracy(predictions, truths, iterations,
resets=None, randoms=None, num=None,
sequenceCounter=None):
"""
Compute accuracy on the whole sequence
"""
accuracy = []
numIteration = []
numSequences = []
for i in xrange(len(predictions) - 1):
if num is not None and i > num:
continue
if truths[i] is None:
continue
correct = truths[i] is None or truths[i] in predictions[i]
accuracy.append(correct)
numSequences.append(sequenceCounter[i])
numIteration.append(iterations[i])
return (accuracy, numIteration, numSequences)
if __name__ == '__main__':
lengths = [10, 20, 40, 60, 80, 100]
tmResults = os.path.join("tm/results",
"high-order-variable-length")
try:
# Load raw experiment results
# You have to run the experiments in ./tm
# python tm_suite.py --experiment="high-order-variable-length" -d
expResults = {}
for length in lengths:
experiment = os.path.join(tmResults,
"sequence_length"+"{:.1f}".format(length),
"0.log")
print "Load Experiment", experiment
data = readExperiment(experiment)
(accuracy, numIteration, numSequences) = computeAccuracyEnding(
data['predictions'],
data['truths'],
data['iterations'],
resets=data['resets'],
randoms=data['randoms'],
sequenceCounter=data['sequenceCounter'])
(accuracyAll, numIterationAll, numSequencesAll) = computeAccuracy(
data['predictions'],
data['truths'],
data['iterations'],
resets=data['resets'],
randoms=data['randoms'],
sequenceCounter=data['sequenceCounter'])
expResult = {"length": length,
"accuracy": accuracy,
"numIteration": numIteration,
"numSequences": numSequences,
"accuracyAll": accuracyAll,
"numIterationAll": numIterationAll,
"numSequencesAll": numSequencesAll}
expResults[length] = expResult
output = open(os.path.join(tmResults,
'SequenceLengthExperiment.pkl'), 'wb')
pickle.dump(expResults, output, -1)
output.close()
except:
print "Cannot find raw experiment results"
print "Plot using saved processed experiment results"
input = open(os.path.join(tmResults,
'SequenceLengthExperiment.pkl'), 'rb')
expResults = pickle.load(input)
# load processed experiment results and plot them
numSequenceRequired = []
numIterationRequired = []
lengths = np.sort(expResults.keys())
plt.close('all')
plt.figure(1)
for length in lengths:
expResult = expResults[length]
accuracy = expResult["accuracy"]
numIteration = expResult["numIteration"]
numSequences = expResult["numSequences"]
movingData = movingAverage(accuracy, min(len(accuracy), 100))
numSequenceRequired.append(
numSequences[np.where(np.array(movingData) >= 0.999)[0][1]])
numIterationRequired.append(
numIteration[np.where(np.array(movingData) >= 0.999)[0][1]])
plt.figure(1)
plotAccuracy((accuracy, numSequences),
window=100,
type=type,
label='NoiseExperiment',
hideTraining=True,
lineSize=1.0)
plt.xlabel('# of sequences seen')
plt.figure(2)
plotAccuracy((expResult["accuracyAll"], expResult["numSequencesAll"]),
window=1000,
type=type,
label='NoiseExperiment',
hideTraining=True,
lineSize=1.0)
plt.xlabel('# of sequences seen')
for fig in [1, 2]:
plt.figure(fig)
plt.ylabel('Prediction accuracy')
plt.ylim([0, 1.05])
plt.legend(lengths, loc=4)
plt.figure(1)
plt.savefig('./result/sequence_length_experiment_performance.pdf')
plt.figure(2)
plt.xlim([0, 100])
plt.savefig('./result/sequence_length_experiment_performance_overall.pdf')
plt.figure()
plt.plot(lengths, numSequenceRequired, '-*')
plt.xlabel(' Sequence Order ')
plt.ylabel(' # Sequences seen to achieve perfect prediction')
plt.savefig('./result/requred_sequence_number_vs_sequence_order.pdf')
plt.figure()
plt.plot(lengths, numIterationRequired, '-*')
plt.xlabel(' Sequence Order ')
plt.ylabel(' # Elements seen to achieve perfect prediction')
plt.savefig('./result/requred_elements_number_vs_sequence_order.pdf')
| agpl-3.0 |
kwecht/BABS-GUI | code/BabsClasses.py | 1 | 6969 | ########################################################################
#
# Kevin Wecht 4 November 2014
#
# Bay Area Bicycle Share (BABS) Open Data Challenge
#
########################################################################
#
# This file contains user defined classes to assist the
# analysis of BABS data.
#
# OUTLINE
# PlotOptions - holds information from widgets to determine
# what to show in the plot window.
# GridParams - holds information about the grid layout of the GUI
#
########################################################################
# Import modules required by these functions
import pandas as pd
import pdb
########################################################################
# Set up grid on which to place widgets in the QtGui Window
class GridParams:
"""Class to hold grid parameters for the QtGui Window."""
# Set default values for grid size
def __init__(self):
# Number of cells in each direction
self.nrow = 48
self.ncol = 72
# Spacing between cells
self.spacing = 2
# Area occupied by matplotlib plot.
# Leave 4 rows and 12 cols to the around the main plot
self.plotnrow = self.nrow - 8
self.plotncol = self.ncol - 12
self.plotrow0 = 0 # Place
self.plotcol0 = 0
self.plotrow1 = self.plotnrow + self.plotrow0 - 1 # -1 to be inclusive range
self.plotcol1 = self.plotncol + self.plotcol0 - 1
# Area occupied by plot options
self.optnrow = self.nrow - self.plotnrow
self.optncol = self.ncol - self.plotncol
self.optrow0 = 0
self.optcol0 = self.plotcol1 + 1
self.optrow1 = self.optrow0 + self.optnrow - 1
self.optcol1 = self.optcol0 + self.optncol - 1
# Number of filter options to squeeze into each column
self.nfiltercol = 3
# Row offsets of each sub-section in the options section
self.maintype_row0 = 0
self.maingroup_row0 = 1
self.timegroup_row0 = 3
self.bingroup_row0 = 3
self.divisiongroup_row0 = 7
self.overgroup_row0 = 12
self.filtergroup_row0 = 17
# Define class to hold parameters that determine what to plot in the main widget.
class PlotOptions:
"""Class to hold info determining what to plot in the main window."""
# Initialize some options to plot number of rides per week
def __init__(self):
# Integer indicating the type of information to show
# {0 'timeseries'|1 'histogram'}
self.typeid = 0
# Integer indicating the value to plot
# {0 'nrides'|1 'duration'|2 'distance'}
self.barid = 0
# Integer indicating the way to bin the data
# {0 'Time (other)'|1 'Number of Rides'|2 'Day of Week'|3 'Hour of Day'|4 'Region'}
self.binid = 0
# String that indicates which variable along which to
# divide the main variable plotted
# For example, we can divide each bar in the plot into two segments:
# annual customers and recent (casual) customers.
# {options here}
self.division = ''
self.division_types = [] # {['Subscriber','Customer'], ['Monday','Tuesday',...], ...}
# Time over which to average data.
# Used when plotting timeseries and
# histogram (optional) for daily or weekly average values (ex. daily mean rides)
# Must be a whole number of days
self.dT = '7D' # Default weekly average. Monthly = 30, daily = 1
# Plot dimensions. If empty, use default dimensions in the data.
self.xlim = []
self.ylim = []
# Set additional values to plot over the bars.
# {weather('temp', 'precip', 'wind') | 'nrides' | 'duration'}
self.overtype = []
# Filter options
# {date range, time of day, day of week, region, weather, station ID}
self.filters = {}
# Populate plot options with the selections in the GUI window
# named MainWindow.
# This overides the default options selected above
#self.populate(MainWindow)
# Method to fill options from currently selected widgets in the gui
def populate(self,MainWindow):
"""Populate plot options using the selections in the GUI window.
INPUT
MainWindow - Instance of a class that defines our main GUI window"""
# 1. From drop down list indicating plot type
self.typeid = MainWindow.mainType.currentIndex()
# 2. From drop down list indicating which variable to plot in bars
self.barid = MainWindow.mainGroup.currentIndex()
# 3. From drop down list indicating how to bin the data
self.binid = MainWindow.binGroup.currentIndex()
# 3. From text entry telling by what time step to bin data
number = int(str(MainWindow.timeText.text()))
unit = str(MainWindow.timeGroup.currentText())[0]
self.dT = str(number)+unit
# 2. From radio buttons indicating by which variable we should
# divide the bars.
self.division = str(MainWindow.divisionGroup.checkedButton().objectName())
if self.division=='Customer Type':
self.division_types=['Subscriber','Customer']
elif self.division=='Hour of Day':
self.division_types = [str(val) for val in range(24)]
elif self.division=='Day of Week':
self.division_types = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
elif self.division=='Region':
self.division_types = ['San Francisco','San Jose','Mountain View','Redwood City','Palo Alto']
# 3. From check buttons indicating what to overplot
self.overtype = []
for button in MainWindow.overGroup.buttons():
if button.isChecked():
self.overtype.append(str(button.text()))
# 4. From filter check boxes indicating what to trim from data
# Store filter information in dictionary in which the keys are
# 'Customer Type', 'Region', 'Day of Week', 'Hour of Day'
self.filters = {}
filtergroups = [MainWindow.filterGroup_customer,
MainWindow.filterGroup_region,
MainWindow.filterGroup_dayofweek,
MainWindow.filterGroup_hourofday]
for group in filtergroups:
groupname = str(group.objectName())
unchecked = []
for button in group.buttons():
if not button.isChecked():
if groupname=="Day of Week":
unchecked.append(str(group.id(button)))
else:
unchecked.append(str(button.text()))
if unchecked!=[]:
self.filters[groupname] = unchecked
| mit |
larsmans/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 31 | 3340 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
print(clf.threshold)
assert_raises(Exception, clf.predict, X)
assert_raises(Exception, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
chrishavlin/nyc_taxi_viz | src/taxi_plotmod.py | 1 | 12395 | """
taxi_plotmod.py
module for plotting and processing taxi data
Copyright (C) 2016 Chris Havlin, <https://chrishavlin.wordpress.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The database is NOT distributed with the code here.
Data source:
NYC Taxi & Limousine Commision, TLC Trip Record Data
<http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml>
"""
"""--------------
Import libraries:
-----------------"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import datetime as dt
"""---------
Classes
------------"""
class binned_variable(object):
""" class for variables binned by time """
def __init__(self,name,time_b,varmin=0,varmax=1):
self.varname = name # variable name
self.time_be = time_b # time bins, edge values
self.time_bc = (time_b[0:time_b.size-1]+time_b[1:time_b.size])/2.0 # time bin center values
self.varmin=varmin
self.varmax=varmax
def bin_the_values(self,VarBig,Var_list):
# initialize the output
self.mean=np.zeros((self.time_be.size-1,1))
self.med=np.zeros((self.time_be.size-1,1))
self.firstquart=np.zeros((self.time_be.size-1,1))
self.thirdquart=np.zeros((self.time_be.size-1,1))
self.std=np.zeros((self.time_be.size-1,1))
self.N=np.zeros((self.time_be.size-1,1))
# extract variable of interest from full dataset
var0 = VarBig[:,Var_list.index(self.varname)]
time0 = VarBig[:,0]
# calculate standard deviation,mean,median and number of obs in each bin
for i_bin in range(self.time_bc.size):
var2bin = var0[time0>self.time_be[i_bin]]
time = time0[time0>self.time_be[i_bin]]
var2bin = var2bin[time<self.time_be[i_bin+1]]
var2bin = var2bin[var2bin>self.varmin]
var2bin = var2bin[var2bin<self.varmax]
self.mean[i_bin]=var2bin.mean()
self.med[i_bin]=np.median(var2bin)
self.std[i_bin]=var2bin.std()
self.N[i_bin]=var2bin.size
self.thirdquart[i_bin]=np.median(var2bin[np.where(var2bin>self.med[i_bin])])
self.firstquart[i_bin]=np.median(var2bin[np.where(var2bin<self.med[i_bin])])
def bin_hist(self,VarBig,Var_list,bin_edge1,bin_edge2):
# extract variable of interest from full dataset
var0 = VarBig[:,Var_list.index(self.varname)]
time0 = VarBig[:,Var_list.index('pickup_time_hr')]
bin_var=var0[time0>bin_edge1]
time=time0[time0>bin_edge1]
bin_var=bin_var[time<bin_edge2]
bin_var = bin_var[bin_var>self.varmin]
bin_var = bin_var[bin_var<self.varmax]
self.hist_id=bin_var
self.hist_id_N=bin_var.size
self.hist_id_mean=bin_var.mean()
self.hist_id_med=np.median(bin_var)
"""---------
Functions
------------"""
def map_proc(VarBig,Var_list,bin_varname,bin_min,bin_max,Pickup,nx,ny,
bottom_left=[40.697089, -74.027397],top_left=[40.823067, -74.027397],
bottom_right=[40.697089, -73.914240],top_right=[40.823067,-73.914240]):
"""
Input Variables:
VarBig
The 2-D array of all variables
Var_list
The list identifying the columns of VarBig
bin_varname
The name of the variable to bin
bin_min, bin_max
The min/max value of the variable to bin
Pickup
Boolean true/false. If true, the variable of interest (bin_varname)
will be binned by the pick up location. If false, the drop off location.
nx,ny
Number of bin-nodes in x and y (i.e., number of bins in x is nx-1)
bottom_left, top_left, etc.
Grid dimensions: lat/lon points defining the corners of a rectangle
Some useful lat/lon points
Manhattan:
bottom_left=[40.697089, -74.027397],top_left=[40.823067, -74.027397],
bottom_right=[40.697089, -73.914240],top_right=[40.823067,-73.914240]
Wall Street:
bottom_left=[40.701035, -74.022382],top_left=[40.715075, -74.022382],
bottom_right=[40.701035, -74.000207],top_right=[40.715075,-74.000207]
Columbus Circle:
bottom_left=[40.765505, -73.985891],top_left=[40.770369, -73.985891],
bottom_right=[40.765505, -73.978744],top_right=[40.770369,-73.978744]
"""
# build cell node grid
x=np.linspace(bottom_left[1],bottom_right[1],nx) # defined on cell corners
y=np.linspace(bottom_left[0],top_left[0],ny) # defined on cell corners
# cell center grid
xc=(x[0:x.size-1]+x[1:x.size])/2
yc=(y[0:y.size-1]+y[1:y.size])/2
# create an empty TaxiCount array
TotCount=np.zeros((ny-1,nx-1)) # defined on cell centers
MeanClr=np.zeros((ny-1,nx-1)) # defined on cell centers
# pull out lat/lon and variable lists depending on pickup or dropoff
if Pickup:
BigLat=VarBig[:,Var_list.index('pickup_lat')]
BigLon=VarBig[:,Var_list.index('pickup_lon')]
else:
BigLat=VarBig[:,Var_list.index('drop_lat')]
BigLon=VarBig[:,Var_list.index('drop_lon')]
ColorVar= VarBig[:,Var_list.index(bin_varname)]
indc=1
# count, but loop over variable
nV = ColorVar.size
dx = abs(xc[1]-xc[0])
dy = abs(yc[1]-yc[0])
prevprog=0
print 'Binning',bin_varname,', with Nvar=',nV
for iV in range(nV-1):
prog= round(float(iV) / float(nV-1) * 100)
if prog % 5 == 0 and prog != prevprog:
print ' ',int(prog),'% complete ...'
prevprog=prog
if ColorVar[iV]>bin_min and ColorVar[iV]<bin_max:
xi=BigLon[iV]
yi=BigLat[iV]
i_y=np.where(abs(yi-yc)<dy/2.0)
i_x=np.where(abs(xi-xc)<dx/2.0)
if i_y[0].size==1 and i_x[0].size==1:
TotCount[i_y[0],i_x[0]]=TotCount[i_y[0],i_x[0]]+1
MeanClr[i_y[0],i_x[0]]=MeanClr[i_y[0],i_x[0]]+ColorVar[iV]
non0=np.where(TotCount>0)
if non0[0].size>0:
MeanClr[non0]=np.divide(MeanClr[non0],TotCount[non0])
print 'Completed binning of ',bin_varname
return TotCount,MeanClr,x,y
def plt_map(Zvar,minZ,maxZ,x,y,LogPlot=False,ShowFig=True,SaveFig=False,savename=' ',
dim_x_in=4,dim_y_in=6,figdpi=180):
"""
maps a spatially binned variable using pcolormesh
input:
Zvar 2D array of binned data
minZ min value for color scale
maxZ max value for color scale
x x dimension, defining nodes of Zvar bins
y y dimension, defining nodes of Zvar bins
LogPlot will use log10(Zvar) if True
ShowFig display the figure after plotting?
SaveFig save the figure?
savename if saving, the filename
dim_x_in x dimension of figure
dim_y_in y dimension of figure
figdpi if saving, the dpi resolution to use
"""
if LogPlot==True:
Zvar = np.log10(Zvar)
minZ = np.log10(minZ)
maxZ = np.log10(maxZ)
# mask the variable
Z = Zvar
# cell edges
[Xgrid,Ygrid]=np.meshgrid(x,y)
# get cell centers and make the meshgrid
xc = (x[0:x.size-1]+x[1:x.size])/2
yc = (y[0:y.size-1]+y[1:y.size])/2
[Xgridc,Ygridc]=np.meshgrid(xc,yc)
# now plot it!
fig=plt.figure()
fig.set_size_inches(dim_x_in,dim_y_in,forward=True)
pcol=plt.pcolormesh(Xgrid,Ygrid,Z,cmap=cm.hot,linewidth=0)
pcol.set_edgecolor('face')
plt.axis('off')
plt.clim(minZ,maxZ)
plt.colorbar()
if SaveFig:
fig.savefig(savename+'.png',bbox_inches='tight',format='png', dpi=figdpi)
if ShowFig:
print 'close figure to continue...'
plt.show()
def find_N_unique_vs_t(Var,Var_list,times):
'''
finds number of cabs hired at given time points.
hired cabs at a given time, N(t), defined as:
N(t) = Npickups(t) + Nactive(t)
where Npickups is the number of pickups at t, exactly, and
Nactive is the number of cabs with a pickup time before t and a drop off time after t.
input:
Var The 2-D array of all variables
Var_list The list identifying the columns of Var
times The time points to calculate N
output:
N_unique Number of cabs at time points
Speed Average speed at each time point
'''
N_t=len(times)
pick=Var[:,Var_list.index('pickup_time_hr')]
elap=Var[:,Var_list.index('elapsed_time_min')]/60.0
drop=pick[:]+elap[:]
speed=Var[:,Var_list.index('speed_mph')]
# copy values with pickup within certain time, offset it.
id_move=np.where(pick>=21.0)
pick=np.append(pick,pick[id_move[0]]-24.0)
drop=np.append(drop,drop[id_move[0]]-24.0)
speed=np.append(speed,speed[id_move[0]])
# initialize the taxi count and speed arrays
N_unique = np.zeros(times.shape)
Speed = np.zeros(times.shape)
for it in range(0,N_t,1):
current_time=times[it]
id_pickup=np.where((drop >= current_time) & (pick<=current_time))
if len(id_pickup[0])>0:
N_unique[it]=len(id_pickup[0])
Speed[it]=speed[id_pickup[0]].mean()
return N_unique,Speed,times
def plt_two_d_histogram(bin_varname,VarMin,VarMax,time_b,VarBig,Var_list):
# median vs time
bin_inst=binned_variable(bin_varname,time_b,VarMin,VarMax)
bin_inst.bin_the_values(VarBig,Var_list)
# cull data by by min,max for 2d histogram plot
max_unbin=bin_inst.varmax
min_unbin=bin_inst.varmin
unbinned_value = VarBig[:,Var_list.index(bin_varname)]
time = VarBig[:,Var_list.index('pickup_time_hr')]
time = time[unbinned_value>=min_unbin]
unbinned_value = unbinned_value[unbinned_value>=min_unbin]
time = time[unbinned_value<=max_unbin]
unbinned_value = unbinned_value[unbinned_value<=max_unbin]
# plot 2d histogram
clr = (0.3,0.3,0.3)
plt.subplot(1,3,1)
plt.hist2d(time,unbinned_value,(48,30),cmap=cm.hot)
plt.colorbar()
plt.plot(bin_inst.time_bc,bin_inst.mean,color=(0.6,0.6,0.6),linewidth=3)
plt.plot(bin_inst.time_bc,bin_inst.med,color=clr,linewidth=3)
plt.plot(bin_inst.time_bc,bin_inst.firstquart,color=clr,linewidth=3)
plt.plot(bin_inst.time_bc,bin_inst.thirdquart,color=clr,linewidth=3)
plt.xlim([0,24])
plt.ylim([min_unbin,max_unbin])
plt.ylim([min_unbin,50])
plt.ylabel(bin_varname)
plt.xlabel('time of day [24-hr]')
# some selected 1-D histograms
plt.subplot(1,3,2)
bin_inst.bin_hist(VarBig,Var_list,5,6)
LAB='5-6,'+str(bin_inst.hist_id_N) + ',' + str(round(bin_inst.hist_id_med,1))
LAB = LAB + ',' + str(round(bin_inst.hist_id_mean,1))
plt.hist(bin_inst.hist_id,bins=50,histtype='step',normed=True,label=LAB)
bin_inst.bin_hist(VarBig,Var_list,9,18)
LAB='9-18,' +str(bin_inst.hist_id_N) + ',' + str(round(bin_inst.hist_id_med,1))
LAB = LAB + ',' + str(round(bin_inst.hist_id_mean,1))
plt.hist(bin_inst.hist_id,bins=50,histtype='step',normed=True,label=LAB)
bin_inst.bin_hist(VarBig,Var_list,20,24)
LAB='20-24,'+str(bin_inst.hist_id_N) + ',' + str(round(bin_inst.hist_id_med,1))
LAB = LAB + ',' + str(round(bin_inst.hist_id_mean,1))
plt.hist(bin_inst.hist_id,bins=50,histtype='step',normed=True,label=LAB)
plt.ylabel('bin N / tot N')
plt.xlabel(bin_varname)
plt.ylim([0,0.15])
plt.legend()
# interquartile range (midspread) plot
plt.subplot(1,3,3)
plt.plot(bin_inst.time_bc,abs(bin_inst.firstquart-bin_inst.thirdquart),color=clr,linewidth=3)
plt.xlabel('time of day [24-hr]')
plt.ylabel('spread of ' + bin_varname + ' (first - third quartiles)')
plt.xlim([0,24])
plt.show()
| gpl-3.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/examples/time_frequency/plot_source_label_time_frequency.py | 19 | 3767 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and phase lock
power, phase_lock = source_induced_power(
this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
phase_lock = np.mean(phase_lock, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(phase_lock,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
mangate/ConvNetSent | process_data_subj.py | 1 | 3164 | __author__ = 'mangate'
import numpy as np
import cPickle
from collections import defaultdict
import re
import pandas as pd
import os.path
from process_data_common import load_bin_vec, add_unknown_words, get_W
def build_data_cv(data_folder, cv=10, clean_string=True):
"""
Loads data and split into 10 folds.
"""
revs = []
obj_file = data_folder[0]
subj_file = data_folder[1]
vocab = defaultdict(float)
files = [obj_file, subj_file]
for i in range(len(files)):
with open(files[i], "rb") as f:
for line in f:
rev = []
rev.append(line.strip())
if clean_string:
orig_rev = clean_str(" ".join(rev))
else:
orig_rev = " ".join(rev).lower()
words = set(orig_rev.split())
for word in words:
vocab[word] += 1
datum = {"y":i,
"text": orig_rev,
"num_words": len(orig_rev.split()),
"split": np.random.randint(0,cv)}
revs.append(datum)
return revs, vocab
def clean_str(string, TREC=False):
"""
Tokenization/string cleaning for all datasets except for SST.
Every dataset is lower cased except for TREC
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip() if TREC else string.strip().lower()
def process_data(file_name):
if os.path.isfile(file_name):
print "file {} already exists".format(file_name)
return
print "creating dataset..."
# load data
print "loading data...",
data_folder = ["data/subj/plot.tok.gt9.5000", "data/subj/quote.tok.gt9.5000"]
revs, vocab = build_data_cv(data_folder, cv=10, clean_string=True)
max_l = np.max(pd.DataFrame(revs)["num_words"])
print "data loaded!"
print "number of sentences: " + str(len(revs))
print "vocab size: " + str(len(vocab))
print "max sentence length: " + str(max_l)
# load word2vec
print "loading word2vec vectors...",
w2v_file = 'data/GoogleNews-vectors-negative300.bin'
w2v = load_bin_vec(w2v_file, vocab)
print "num words already in word2vec: " + str(len(w2v))
print "word2vec loaded!"
#Addind random vectors for all unknown words
add_unknown_words(w2v, vocab)
W, word_idx_map = get_W(w2v)
rand_vecs = {}
add_unknown_words(rand_vecs, vocab)
W2, _ = get_W(rand_vecs)
# dump to pickle file
cPickle.dump([revs, W, W2, word_idx_map, vocab, max_l], open(file_name, "wb"))
print "dataset created!" | mit |
nlpub/russe-evaluation | russe/evaluation/evaluate_test.py | 1 | 3013 | #!/usr/bin/env python
from os.path import join, splitext, basename, dirname
from pandas import read_csv, Series
from collections import defaultdict
import argparse
from sys import stderr
from evaluate import hj_evaluation, semantic_relation_classification_evaluation
TEST = True # if true use *-test.csv for evaulation, else use *-train.csv
DATA_DIR = "." # the directory where *-test.csv and *-train.csv lives
HJ_TEST = join(DATA_DIR, "hj-test.csv" if TEST else "hj-train.csv")
RT_TEST = join(DATA_DIR, "rt-test.csv" if TEST else "rt-train.csv")
AE_TEST = join(DATA_DIR, "ae-test.csv" if TEST else "ae-train.csv")
AE2_TEST = join(DATA_DIR, "ae2-test.csv" if TEST else "ae2-train.csv")
def get_test(test_fpath):
test_df = read_csv(test_fpath, ',', encoding='utf8')
test = defaultdict(dict)
for i, r in test_df.iterrows():
test[r["word1"]][r["word2"]] = r["sim"]
return test
def create_usim(df_fpath, test_fpath):
print "golden standard:", df_fpath
print "test.csv:", test_fpath
test = get_test(test_fpath)
df = read_csv(df_fpath, ',', encoding='utf8')
not_found_num = 0
usim_lst = []
for i, r in df.iterrows():
w1 = r["word1"]
w2 = r["word2"]
found = w1 in test and w2 in test[w1]
usim = test[w1][w2] if found else 0.0
if not found:
#print w1, w2
not_found_num += 1
usim_lst.append(usim)
df["usim"] = Series(usim_lst)
print "not found", not_found_num, "of", i
print "used", i - not_found_num
usim_fpath = join(dirname(test_fpath), splitext(basename(df_fpath))[0] + "-usim.csv")
df.to_csv(usim_fpath, encoding="utf-8", index=False, sep=",")
print "golden standard + test.csv:", usim_fpath, "\n"
return usim_fpath
def evaluation(args):
print "test.csv:", args.test_fpath
hj_fpath = create_usim(HJ_TEST, args.test_fpath)
rt_fpath = create_usim(RT_TEST, args.test_fpath)
ae_fpath = create_usim(AE_TEST, args.test_fpath)
ae2_fpath = create_usim(AE2_TEST, args.test_fpath)
r = {}
r["hj"] = hj_evaluation(hj_fpath)
r["aehj"] = hj_evaluation(ae_fpath)
r["ae2hj"] = hj_evaluation(ae2_fpath)
r["rt-avep"], r["rt-accuracy"] = semantic_relation_classification_evaluation(rt_fpath)
r["ae-avep"], r["ae-accuracy"] = semantic_relation_classification_evaluation(ae_fpath)
r["ae2-avep"], r["ae2-accuracy"] = semantic_relation_classification_evaluation(ae2_fpath)
print >> stderr, "hj\trt-avep\trt-accuracy\tae-avep\tae-accuracy\tae2-avep\tae2-accuracy"
print >> stderr, "%(hj).5f\t%(rt-avep).5f\t%(rt-accuracy).5f\t%(ae-avep).5f\t%(ae-accuracy).5f\t%(ae2-avep).5f\t%(ae2-accuracy).5f" % r
def main():
parser = argparse.ArgumentParser(description='Evaluate from test.csv file.')
parser.set_defaults(func=evaluation)
parser.add_argument('test_fpath', help='Path to test.csv file.')
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| mit |
lmcinnes/umap | doc/bokeh_digits_plot.py | 1 | 1809 | import numpy as np
from sklearn.datasets import load_digits
import pandas as pd
digits = load_digits()
import umap
reducer = umap.UMAP(random_state=42)
embedding = reducer.fit_transform(digits.data)
from io import BytesIO
from PIL import Image
import base64
def embeddable_image(data):
img_data = 255 - 15 * data.astype(np.uint8)
image = Image.fromarray(img_data, mode="L").resize((64, 64), Image.BICUBIC)
buffer = BytesIO()
image.save(buffer, format="png")
for_encoding = buffer.getvalue()
return "data:image/png;base64," + base64.b64encode(for_encoding).decode()
from bokeh.plotting import figure, show, output_file
from bokeh.models import HoverTool, ColumnDataSource, CategoricalColorMapper
from bokeh.palettes import Spectral10
output_file("basic_usage_bokeh_example.html")
digits_df = pd.DataFrame(embedding, columns=("x", "y"))
digits_df["digit"] = [str(x) for x in digits.target]
digits_df["image"] = list(map(embeddable_image, digits.images))
datasource = ColumnDataSource(digits_df)
color_mapping = CategoricalColorMapper(
factors=[str(9 - x) for x in digits.target_names], palette=Spectral10
)
plot_figure = figure(
title="UMAP projection of the Digits dataset",
plot_width=600,
plot_height=600,
tools=("pan, wheel_zoom, reset"),
)
plot_figure.add_tools(
HoverTool(
tooltips="""
<div>
<div>
<img src='@image' style='float: left; margin: 5px 5px 5px 5px'/>
</div>
<div>
<span style='font-size: 16px; color: #224499'>Digit:</span>
<span style='font-size: 18px'>@digit</span>
</div>
</div>
"""
)
)
plot_figure.circle(
"x",
"y",
source=datasource,
color=dict(field="digit", transform=color_mapping),
line_alpha=0.6,
fill_alpha=0.6,
size=4,
)
show(plot_figure)
| bsd-3-clause |
xubenben/data-science-from-scratch | code/working_with_data.py | 61 | 16549 | from __future__ import division
from collections import Counter, defaultdict
from functools import partial
from linear_algebra import shape, get_row, get_column, make_matrix, \
vector_mean, vector_sum, dot, magnitude, vector_subtract, scalar_multiply
from statistics import correlation, standard_deviation, mean
from probability import inverse_normal_cdf
from gradient_descent import maximize_batch
import math, random, csv
import matplotlib.pyplot as plt
import dateutil.parser
def bucketize(point, bucket_size):
"""floor the point to the next lower multiple of bucket_size"""
return bucket_size * math.floor(point / bucket_size)
def make_histogram(points, bucket_size):
"""buckets the points and counts how many in each bucket"""
return Counter(bucketize(point, bucket_size) for point in points)
def plot_histogram(points, bucket_size, title=""):
histogram = make_histogram(points, bucket_size)
plt.bar(histogram.keys(), histogram.values(), width=bucket_size)
plt.title(title)
plt.show()
def compare_two_distributions():
random.seed(0)
uniform = [random.randrange(-100,101) for _ in range(200)]
normal = [57 * inverse_normal_cdf(random.random())
for _ in range(200)]
plot_histogram(uniform, 10, "Uniform Histogram")
plot_histogram(normal, 10, "Normal Histogram")
def random_normal():
"""returns a random draw from a standard normal distribution"""
return inverse_normal_cdf(random.random())
xs = [random_normal() for _ in range(1000)]
ys1 = [ x + random_normal() / 2 for x in xs]
ys2 = [-x + random_normal() / 2 for x in xs]
def scatter():
plt.scatter(xs, ys1, marker='.', color='black', label='ys1')
plt.scatter(xs, ys2, marker='.', color='gray', label='ys2')
plt.xlabel('xs')
plt.ylabel('ys')
plt.legend(loc=9)
plt.show()
def correlation_matrix(data):
"""returns the num_columns x num_columns matrix whose (i, j)th entry
is the correlation between columns i and j of data"""
_, num_columns = shape(data)
def matrix_entry(i, j):
return correlation(get_column(data, i), get_column(data, j))
return make_matrix(num_columns, num_columns, matrix_entry)
def make_scatterplot_matrix():
# first, generate some random data
num_points = 100
def random_row():
row = [None, None, None, None]
row[0] = random_normal()
row[1] = -5 * row[0] + random_normal()
row[2] = row[0] + row[1] + 5 * random_normal()
row[3] = 6 if row[2] > -2 else 0
return row
random.seed(0)
data = [random_row()
for _ in range(num_points)]
# then plot it
_, num_columns = shape(data)
fig, ax = plt.subplots(num_columns, num_columns)
for i in range(num_columns):
for j in range(num_columns):
# scatter column_j on the x-axis vs column_i on the y-axis
if i != j: ax[i][j].scatter(get_column(data, j), get_column(data, i))
# unless i == j, in which case show the series name
else: ax[i][j].annotate("series " + str(i), (0.5, 0.5),
xycoords='axes fraction',
ha="center", va="center")
# then hide axis labels except left and bottom charts
if i < num_columns - 1: ax[i][j].xaxis.set_visible(False)
if j > 0: ax[i][j].yaxis.set_visible(False)
# fix the bottom right and top left axis labels, which are wrong because
# their charts only have text in them
ax[-1][-1].set_xlim(ax[0][-1].get_xlim())
ax[0][0].set_ylim(ax[0][1].get_ylim())
plt.show()
def parse_row(input_row, parsers):
"""given a list of parsers (some of which may be None)
apply the appropriate one to each element of the input_row"""
return [parser(value) if parser is not None else value
for value, parser in zip(input_row, parsers)]
def parse_rows_with(reader, parsers):
"""wrap a reader to apply the parsers to each of its rows"""
for row in reader:
yield parse_row(row, parsers)
def try_or_none(f):
"""wraps f to return None if f raises an exception
assumes f takes only one input"""
def f_or_none(x):
try: return f(x)
except: return None
return f_or_none
def parse_row(input_row, parsers):
return [try_or_none(parser)(value) if parser is not None else value
for value, parser in zip(input_row, parsers)]
def try_parse_field(field_name, value, parser_dict):
"""try to parse value using the appropriate function from parser_dict"""
parser = parser_dict.get(field_name) # None if no such entry
if parser is not None:
return try_or_none(parser)(value)
else:
return value
def parse_dict(input_dict, parser_dict):
return { field_name : try_parse_field(field_name, value, parser_dict)
for field_name, value in input_dict.iteritems() }
#
#
# MANIPULATING DATA
#
#
def picker(field_name):
"""returns a function that picks a field out of a dict"""
return lambda row: row[field_name]
def pluck(field_name, rows):
"""turn a list of dicts into the list of field_name values"""
return map(picker(field_name), rows)
def group_by(grouper, rows, value_transform=None):
# key is output of grouper, value is list of rows
grouped = defaultdict(list)
for row in rows:
grouped[grouper(row)].append(row)
if value_transform is None:
return grouped
else:
return { key : value_transform(rows)
for key, rows in grouped.iteritems() }
def percent_price_change(yesterday, today):
return today["closing_price"] / yesterday["closing_price"] - 1
def day_over_day_changes(grouped_rows):
# sort the rows by date
ordered = sorted(grouped_rows, key=picker("date"))
# zip with an offset to get pairs of consecutive days
return [{ "symbol" : today["symbol"],
"date" : today["date"],
"change" : percent_price_change(yesterday, today) }
for yesterday, today in zip(ordered, ordered[1:])]
#
#
# RESCALING DATA
#
#
def scale(data_matrix):
num_rows, num_cols = shape(data_matrix)
means = [mean(get_column(data_matrix,j))
for j in range(num_cols)]
stdevs = [standard_deviation(get_column(data_matrix,j))
for j in range(num_cols)]
return means, stdevs
def rescale(data_matrix):
"""rescales the input data so that each column
has mean 0 and standard deviation 1
ignores columns with no deviation"""
means, stdevs = scale(data_matrix)
def rescaled(i, j):
if stdevs[j] > 0:
return (data_matrix[i][j] - means[j]) / stdevs[j]
else:
return data_matrix[i][j]
num_rows, num_cols = shape(data_matrix)
return make_matrix(num_rows, num_cols, rescaled)
#
# DIMENSIONALITY REDUCTION
#
X = [
[20.9666776351559,-13.1138080189357],
[22.7719907680008,-19.8890894944696],
[25.6687103160153,-11.9956004517219],
[18.0019794950564,-18.1989191165133],
[21.3967402102156,-10.8893126308196],
[0.443696899177716,-19.7221132386308],
[29.9198322142127,-14.0958668502427],
[19.0805843080126,-13.7888747608312],
[16.4685063521314,-11.2612927034291],
[21.4597664701884,-12.4740034586705],
[3.87655283720532,-17.575162461771],
[34.5713920556787,-10.705185165378],
[13.3732115747722,-16.7270274494424],
[20.7281704141919,-8.81165591556553],
[24.839851437942,-12.1240962157419],
[20.3019544741252,-12.8725060780898],
[21.9021426929599,-17.3225432396452],
[23.2285885715486,-12.2676568419045],
[28.5749111681851,-13.2616470619453],
[29.2957424128701,-14.6299928678996],
[15.2495527798625,-18.4649714274207],
[26.5567257400476,-9.19794350561966],
[30.1934232346361,-12.6272709845971],
[36.8267446011057,-7.25409849336718],
[32.157416823084,-10.4729534347553],
[5.85964365291694,-22.6573731626132],
[25.7426190674693,-14.8055803854566],
[16.237602636139,-16.5920595763719],
[14.7408608850568,-20.0537715298403],
[6.85907008242544,-18.3965586884781],
[26.5918329233128,-8.92664811750842],
[-11.2216019958228,-27.0519081982856],
[8.93593745011035,-20.8261235122575],
[24.4481258671796,-18.0324012215159],
[2.82048515404903,-22.4208457598703],
[30.8803004755948,-11.455358009593],
[15.4586738236098,-11.1242825084309],
[28.5332537090494,-14.7898744423126],
[40.4830293441052,-2.41946428697183],
[15.7563759125684,-13.5771266003795],
[19.3635588851727,-20.6224770470434],
[13.4212840786467,-19.0238227375766],
[7.77570680426702,-16.6385739839089],
[21.4865983854408,-15.290799330002],
[12.6392705930724,-23.6433305964301],
[12.4746151388128,-17.9720169566614],
[23.4572410437998,-14.602080545086],
[13.6878189833565,-18.9687408182414],
[15.4077465943441,-14.5352487124086],
[20.3356581548895,-10.0883159703702],
[20.7093833689359,-12.6939091236766],
[11.1032293684441,-14.1383848928755],
[17.5048321498308,-9.2338593361801],
[16.3303688220188,-15.1054735529158],
[26.6929062710726,-13.306030567991],
[34.4985678099711,-9.86199941278607],
[39.1374291499406,-10.5621430853401],
[21.9088956482146,-9.95198845621849],
[22.2367457578087,-17.2200123442707],
[10.0032784145577,-19.3557700653426],
[14.045833906665,-15.871937521131],
[15.5640911917607,-18.3396956121887],
[24.4771926581586,-14.8715313479137],
[26.533415556629,-14.693883922494],
[12.8722580202544,-21.2750596021509],
[24.4768291376862,-15.9592080959207],
[18.2230748567433,-14.6541444069985],
[4.1902148367447,-20.6144032528762],
[12.4332594022086,-16.6079789231489],
[20.5483758651873,-18.8512560786321],
[17.8180560451358,-12.5451990696752],
[11.0071081078049,-20.3938092335862],
[8.30560561422449,-22.9503944138682],
[33.9857852657284,-4.8371294974382],
[17.4376502239652,-14.5095976075022],
[29.0379635148943,-14.8461553663227],
[29.1344666599319,-7.70862921632672],
[32.9730697624544,-15.5839178785654],
[13.4211493998212,-20.150199857584],
[11.380538260355,-12.8619410359766],
[28.672631499186,-8.51866271785711],
[16.4296061111902,-23.3326051279759],
[25.7168371582585,-13.8899296143829],
[13.3185154732595,-17.8959160024249],
[3.60832478605376,-25.4023343597712],
[39.5445949652652,-11.466377647931],
[25.1693484426101,-12.2752652925707],
[25.2884257196471,-7.06710309184533],
[6.77665715793125,-22.3947299635571],
[20.1844223778907,-16.0427471125407],
[25.5506805272535,-9.33856532270204],
[25.1495682602477,-7.17350567090738],
[15.6978431006492,-17.5979197162642],
[37.42780451491,-10.843637288504],
[22.974620174842,-10.6171162611686],
[34.6327117468934,-9.26182440487384],
[34.7042513789061,-6.9630753351114],
[15.6563953929008,-17.2196961218915],
[25.2049825789225,-14.1592086208169]
]
def de_mean_matrix(A):
"""returns the result of subtracting from every value in A the mean
value of its column. the resulting matrix has mean 0 in every column"""
nr, nc = shape(A)
column_means, _ = scale(A)
return make_matrix(nr, nc, lambda i, j: A[i][j] - column_means[j])
def direction(w):
mag = magnitude(w)
return [w_i / mag for w_i in w]
def directional_variance_i(x_i, w):
"""the variance of the row x_i in the direction w"""
return dot(x_i, direction(w)) ** 2
def directional_variance(X, w):
"""the variance of the data in the direction w"""
return sum(directional_variance_i(x_i, w) for x_i in X)
def directional_variance_gradient_i(x_i, w):
"""the contribution of row x_i to the gradient of
the direction-w variance"""
projection_length = dot(x_i, direction(w))
return [2 * projection_length * x_ij for x_ij in x_i]
def directional_variance_gradient(X, w):
return vector_sum(directional_variance_gradient_i(x_i,w) for x_i in X)
def first_principal_component(X):
guess = [1 for _ in X[0]]
unscaled_maximizer = maximize_batch(
partial(directional_variance, X), # is now a function of w
partial(directional_variance_gradient, X), # is now a function of w
guess)
return direction(unscaled_maximizer)
def first_principal_component_sgd(X):
guess = [1 for _ in X[0]]
unscaled_maximizer = maximize_stochastic(
lambda x, _, w: directional_variance_i(x, w),
lambda x, _, w: directional_variance_gradient_i(x, w),
X, [None for _ in X], guess)
return direction(unscaled_maximizer)
def project(v, w):
"""return the projection of v onto w"""
coefficient = dot(v, w)
return scalar_multiply(coefficient, w)
def remove_projection_from_vector(v, w):
"""projects v onto w and subtracts the result from v"""
return vector_subtract(v, project(v, w))
def remove_projection(X, w):
"""for each row of X
projects the row onto w, and subtracts the result from the row"""
return [remove_projection_from_vector(x_i, w) for x_i in X]
def principal_component_analysis(X, num_components):
components = []
for _ in range(num_components):
component = first_principal_component(X)
components.append(component)
X = remove_projection(X, component)
return components
def transform_vector(v, components):
return [dot(v, w) for w in components]
def transform(X, components):
return [transform_vector(x_i, components) for x_i in X]
if __name__ == "__main__":
print "correlation(xs, ys1)", correlation(xs, ys1)
print "correlation(xs, ys2)", correlation(xs, ys2)
# safe parsing
data = []
with open("comma_delimited_stock_prices.csv", "rb") as f:
reader = csv.reader(f)
for line in parse_rows_with(reader, [dateutil.parser.parse, None, float]):
data.append(line)
for row in data:
if any(x is None for x in row):
print row
print "stocks"
with open("stocks.txt", "rb") as f:
reader = csv.DictReader(f, delimiter="\t")
data = [parse_dict(row, { 'date' : dateutil.parser.parse,
'closing_price' : float })
for row in reader]
max_aapl_price = max(row["closing_price"]
for row in data
if row["symbol"] == "AAPL")
print "max aapl price", max_aapl_price
# group rows by symbol
by_symbol = defaultdict(list)
for row in data:
by_symbol[row["symbol"]].append(row)
# use a dict comprehension to find the max for each symbol
max_price_by_symbol = { symbol : max(row["closing_price"]
for row in grouped_rows)
for symbol, grouped_rows in by_symbol.iteritems() }
print "max price by symbol"
print max_price_by_symbol
# key is symbol, value is list of "change" dicts
changes_by_symbol = group_by(picker("symbol"), data, day_over_day_changes)
# collect all "change" dicts into one big list
all_changes = [change
for changes in changes_by_symbol.values()
for change in changes]
print "max change", max(all_changes, key=picker("change"))
print "min change", min(all_changes, key=picker("change"))
# to combine percent changes, we add 1 to each, multiply them, and subtract 1
# for instance, if we combine +10% and -20%, the overall change is
# (1 + 10%) * (1 - 20%) - 1 = 1.1 * .8 - 1 = -12%
def combine_pct_changes(pct_change1, pct_change2):
return (1 + pct_change1) * (1 + pct_change2) - 1
def overall_change(changes):
return reduce(combine_pct_changes, pluck("change", changes))
overall_change_by_month = group_by(lambda row: row['date'].month,
all_changes,
overall_change)
print "overall change by month"
print overall_change_by_month
print "rescaling"
data = [[1, 20, 2],
[1, 30, 3],
[1, 40, 4]]
print "original: ", data
print "scale: ", scale(data)
print "rescaled: ", rescale(data)
print
print "PCA"
Y = de_mean_matrix(X)
components = principal_component_analysis(Y, 2)
print "principal components", components
print "first point", Y[0]
print "first point transformed", transform_vector(Y[0], components)
| unlicense |
eroicaleo/LearningPython | HandsOnML/ch09/house_gd_optimizer.py | 1 | 1309 | #!/usr/bin/env python
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
learning_rate = 0.01
n_epochs = 10000
def scaler_norm(a):
return StandardScaler().fit(a).transform(a)
housing = fetch_california_housing()
m, n = housing.data.shape
housing_data_norm = scaler_norm(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing_data_norm]
y_norm = scaler_norm(housing.target.reshape(-1, 1))
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(y_norm, dtype=tf.float32, name='y')
XT = tf.transpose(X)
theta = tf.Variable(tf.random_uniform([n+1, 1], -1.0, 1.0), dtype=tf.float32, name='theta')
y_pred = tf.matmul(X, theta)
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
print('#'*80)
print('## Autodiff Gradient descent')
print('#'*80)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
| mit |
tayebzaidi/HonorsThesisTZ | ThesisCode/classification/plot_tsne.py | 2 | 4642 | #!/usr/bin/env python
import sys
import os
import json
import numpy as np
from sklearn import preprocessing
from sklearn.manifold import TSNE
# sys.path.insert(1,'~/Multicore-TSNE')
# from MulticoreTSNE import MulticoreTSNE as TSNE
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
def main():
metric = "correlation" #"euclidean", "mahalanobis"
# the best perplexity seems to be ~300 - it's essentially the number of neighbors you think share a label
best_learning_rate = 100
maxiter = 2000
coeffs = []
labels = []
keys = []
# supply filename as argument 1
fn = sys.argv[1]
# there's just too little data in this dataset to warrant all those groups
#groups = { 0:(u'II', u'II P', u'IIb', u'IIn'),
# 1:(u'Ia',),
# 2:( u'Ia Pec', u'Ia-02cx'),
# 3:(u'Ib', u'Ib Pec', u'Ib-n/IIb-n', u'Ib/c',\
# u'Ibn', u'Ic', u'Ic BL', u'Ic Pec', u'Ic/Ic-bl'),
# 4:(u'SLSN', u'SLSN-I', u'SLSN-II')}
groups = { 0:(u'II', u'II P', u'IIb', u'IIn',\
u'Ib', u'Ib Pec', u'Ib-n/IIb-n', u'Ib/c',\
u'Ibn', u'Ic', u'Ic BL', u'Ic Pec', u'Ic/Ic-bl',\
u'SLSN', u'SLSN-I', u'SLSN-II'),
1:(u'Ia',)}
#2:( u'Ia Pec', u'Ia-02cx'),
#3:(u'Ib', u'Ib Pec', u'Ib-n/IIb-n', u'Ib/c',\
# u'Ibn', u'Ic', u'Ic BL', u'Ic Pec', u'Ic/Ic-bl'),
#4:(u'SLSN', u'SLSN-I', u'SLSN-II')}
# parse the data
with open(fn, 'r') as f:
d = json.load(f)
for key in d.keys():
keys.append(key)
coeffs.append(d[key]['coeffs'])
type = d[key]['type']
for group, types in groups.items():
if type in types:
labels.append(group)
continue
coeffs = np.array(coeffs)
labels = np.array(labels)
keys = np.array(keys)
colors = ['C{:n}'.format(x) for x in labels]
ucolors = ['C{:n}'.format(x) for x in sorted(np.unique(labels))]
#stringlabels = ['II', 'Ia', 'Iax', 'Ib/c', 'SLSN']
stringlabels = ['Ia', 'Non-Ia']
# make the TSNE
X_scaled = preprocessing.scale(coeffs)
model = TSNE(n_components=2, random_state=0, perplexity=float(sys.argv[2]),\
n_iter=maxiter, verbose=2, learning_rate=100, init="pca", metric=metric)
# there's an alternate package from github, but doesn't matter for this dataset since it is small
#model = TSNE(n_jobs=8, n_iter=max_iter, method='exact', perplexity=float(sys.argv[2]))
# find the transformation to a 2D space
X_scaled = preprocessing.scale(coeffs)
out = model.fit_transform(X_scaled)
###Print outliers and check manually
#x first
print(out.shape)
bad_idxs_x = mad_based_outlier(out[:,0])
bad_idxs_y = mad_based_outlier(out[:,1])
print(keys[bad_idxs_x])
print(keys[bad_idxs_y])
bad_idxs = np.bitwise_or(bad_idxs_x, bad_idxs_y)
out = np.delete(out, np.where(bad_idxs==True), 0)
labels = elim_idxs(labels, bad_idxs_x, bad_idxs_y)
keys = elim_idxs(keys, bad_idxs_x, bad_idxs_y)
colors = elim_idxs(colors, bad_idxs_x, bad_idxs_y)
ucolors = elim_idxs(ucolors, bad_idxs_x, bad_idxs_y)
# plot the results
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
ax.scatter(out[:,0], out[:,1], c=colors, alpha=0.7)
# add a legend
lines = []
for col, type in zip(ucolors, stringlabels):
lines.append(mlines.Line2D([], [], color=col, marker='o', ls='None', ms=10, label=type))
ax.legend(handles=lines, frameon=False, fontsize='large')
# label axes
ax.set_xlabel('TSNE u', fontsize='x-large')
ax.set_ylabel('TSNE v', fontsize='x-large')
ax.set_title(sys.argv[3], fontsize='xx-large')
#ax.set_ylim(-5,5)
#ax.set_xlim(-5,5)
plt.tight_layout()
# save plot
outf = os.path.basename(fn)
fig.savefig('tsne_{}.pdf'.format(outf.replace('.json','')))
plt.ion()
plt.show(fig)
plt.close(fig)
def elim_idxs(obj, idxs_x, idxs_y):
return np.delete(np.delete(obj, idxs_x), idxs_y)
###Code copied from stack overflow 22354094 for MAD outlier detection
def mad_based_outlier(points, thresh=3.5):
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
if __name__=='__main__':
sys.exit(main())
| gpl-3.0 |
sem-geologist/hyperspy | hyperspy/learn/svd_pca.py | 7 | 3075 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANdata WARRANTdata; without even the implied warranty of
# MERCHANTABILITdata or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# dataou should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import numpy as np
import scipy.linalg
from hyperspy.misc.machine_learning.import_sklearn import (
fast_svd, sklearn_installed)
_logger = logging.getLogger(__name__)
def svd_pca(data, fast=False, output_dimension=None, centre=None,
auto_transpose=True):
"""Perform PCA using SVD.
Parameters
----------
data : numpy array
MxN array of input data (M variables, N trials)
fast : bool
Wheter to use randomized svd estimation to estimate a limited number of
componentes given by output_dimension
output_dimension : int
Number of components to estimate when fast is True
centre : None | 'variables' | 'trials'
If None no centring is applied. If 'variable' the centring will be
performed in the variable axis. If 'trials', the centring will be
performed in the 'trials' axis.
auto_transpose : bool
If True, automatically transposes the data to boost performance
Returns
-------
factors : numpy array
loadings : numpy array
explained_variance : numpy array
mean : numpy array or None (if center is None)
"""
N, M = data.shape
if centre is not None:
if centre == 'variables':
mean = data.mean(1)[:, np.newaxis]
elif centre == 'trials':
mean = data.mean(0)[np.newaxis, :]
else:
raise AttributeError(
'centre must be one of: None, variables, trials')
data -= mean
else:
mean = None
if auto_transpose is True:
if N < M:
_logger.info("Auto transposing the data")
data = data.T
else:
auto_transpose = False
if fast is True and sklearn_installed is True:
if output_dimension is None:
raise ValueError('When using fast_svd it is necessary to '
'define the output_dimension')
U, S, V = fast_svd(data, output_dimension)
else:
U, S, V = scipy.linalg.svd(data, full_matrices=False)
if auto_transpose is False:
factors = V.T
explained_variance = S ** 2 / N
loadings = U * S
else:
loadings = V.T
explained_variance = S ** 2 / N
factors = U * S
return factors, loadings, explained_variance, mean
| gpl-3.0 |
ramidas/ChIA-PET_sigvis | drawSignal.py | 1 | 2104 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import sys
from matplotlib import pyplot as plt
import numpy as np
import argparse
def getKaryotype(fname):
"""returns dictionary e.g.: {'chr13': 115169878, ... } """
data = [i.strip().split() for i in open(fname) if i[:3] == 'chr']
hs = {}
for i in data:
hs[i[6]] = int(i[5])
return hs
def drawSignals(karyotype, chromosome, signalFiles, segments):
for f in signalFiles:
y = np.fromfile(f, dtype=np.uint16)
if y.size > 1e6:
every = int(y.size//1e6)
print "[ \033[1;33mWARN\033[1;m ] Your data from {} are very big so they were probed every {:d} point so they could fit in circa 1 000 000 points. Sorry :(".format(f.name, every)
y = y[0::every]
x = np.linspace(0,karyotype[chromosome],len(y))
print "Plotting {}".format(f.name)
bla = plt.plot(x,y, '-', label=f.name)
color = bla[-1].get_color()
plt.fill_between(x,y, color = color, alpha=0.1)
if segments:
tmp = [i.strip().split() for i in segments ]
segByPrzemek = [int(i[1]) for i in tmp if i[0] == chromosome]
plt.plot(segByPrzemek,[0 for i in xrange(len(segByPrzemek))],'d',color='yellow',linestyle="none" )
plt.legend()
plt.grid()
plt.title("ChIA-PET signal for {}".format(chromosome))
plt.show()
def main():
parser = argparse.ArgumentParser(description='Script for plotting ChIA-PET signal')
parser.add_argument('karyotype', help='karyotype in Circos format')
parser.add_argument('chromosome', help='chromosome. Np chr22')
parser.add_argument('signalFiles', type=argparse.FileType('r'), nargs='+', help="filenames with signal saved in numpy np.uint16 binary format")
parser.add_argument('-s', '--segments', type=argparse.FileType('r'), help="name of file with segments")
args = parser.parse_args()
karyotype = getKaryotype(args.karyotype)
drawSignals(karyotype, args.chromosome, args.signalFiles, args.segments)
if __name__ == '__main__':
main()
| mit |
smartscheduling/scikit-learn-categorical-tree | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/frame/methods/test_duplicated.py | 1 | 3190 | import re
import numpy as np
import pytest
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]])
def test_duplicated_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
msg = re.escape("Index(['a'], dtype='object')")
with pytest.raises(KeyError, match=msg):
df.duplicated(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {f"col_{i:02d}": np.random.randint(0, 1000, 30000) for i in range(100)}
df = DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool Series as a result and don't fail during
# calculation. Actual values doesn't matter here, though usually it's all
# False in this case
assert isinstance(result, Series)
assert result.dtype == np.bool
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, False, True])),
("last", Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True])),
],
)
def test_duplicated_keep(keep, expected):
df = DataFrame({"A": [0, 1, 1, 2, 0], "B": ["a", "b", "b", "c", "a"]})
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal")
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, False, True])),
("last", Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True])),
],
)
def test_duplicated_nan_none(keep, expected):
df = DataFrame({"C": [np.nan, 3, 3, None, np.nan]}, dtype=object)
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("subset", [None, ["A", "B"], "A"])
def test_duplicated_subset(subset, keep):
df = DataFrame(
{
"A": [0, 1, 1, 2, 0],
"B": ["a", "b", "b", "c", "a"],
"C": [np.nan, 3, 3, None, np.nan],
}
)
if subset is None:
subset = list(df.columns)
elif isinstance(subset, str):
# need to have a DataFrame, not a Series
# -> select columns with singleton list, not string
subset = [subset]
expected = df[subset].duplicated(keep=keep)
result = df.duplicated(keep=keep, subset=subset)
tm.assert_series_equal(result, expected)
def test_duplicated_on_empty_frame():
# GH 25184
df = DataFrame(columns=["a", "b"])
dupes = df.duplicated("a")
result = df[dupes]
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_frame_datetime64_duplicated():
dates = date_range("2010-07-01", end="2010-08-05")
tst = DataFrame({"symbol": "AAA", "date": dates})
result = tst.duplicated(["date", "symbol"])
assert (-result).all()
tst = DataFrame({"date": dates})
result = tst.duplicated()
assert (-result).all()
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
allthroughthenight/aces | python/drivers/linear_wave_theory.py | 1 | 12795 | import sys
import math
sys.path.append('../functions')
from base_driver import BaseDriver
from helper_objects import BaseField
import USER_INPUT
from ERRWAVBRK1 import ERRWAVBRK1
from WAVELEN import WAVELEN
import numpy as np
import matplotlib.pyplot as plt
from EXPORTER import EXPORTER
## ACES Update to MATLAB
#-------------------------------------------------------------
# Driver for Linear Wave Theory (page 2-1 in ACES User's Guide)
# Yields first-order approximations for various wave parameters of wave
# motion as predicted by linear wave theory
# Transferred by: Mary Anderson, USACE-CHL-Coastal Processes Branch
# Date Created: March 17, 2011
# Date Modified: June 26th, 2016 by yaprak
# Requires the following functions:
# ERRWAVBRK1
# WAVELEN
# MAIN VARIABLE LIST:
# INPUT
# H: wave height (m or ft)
# T: wave period (sec)
# d: water depth (m or ft)
# z: vertical coordinate (m or ft)
# xL: horizontal coordinate as fraction of wavelength (x/L)
# OUTPUT
# L: wavelength (m or ft)
# C: wave celerity (m/sec or ft/sec)
# Cg: group celerity (m/sec or ft/sec)
# E: energy density (N-m/m^2 or ft-lb/ft^2)
# Ef: energy flux (N-m/sec-m or ft-lb/sec-ft)
# Ur: Ursell number
# eta: surface elevation (m or ft)
# px: horizontal particle displacement (m or ft)
# LOOK AT PZ AND PY
# pz: vertical particle displacement (m or ft)
# u: horizontal particle velocity (m/sec or ft/sec)
# w: vertical particle velocity (m/sec or ft/sec)
# dudt: horizontal particle acceleration (m/sec^2 or ft/sec^2)
# dwdt: vertical particle accleration (m/sec^2 or ft/sec^2)
# pres: pressure (N/m^2 or lb ft^2 )
## -------------------------------------------------------------
class LinearWaveTheory(BaseDriver):
def __init__(self, H = None, T = None, d = None,\
z = None, xL = None):
self.exporter = EXPORTER("output/exportLinearWaveTheory")
if H != None:
self.isSingleCase = True
self.defaultValueH = H
if T != None:
self.isSingleCase = True
self.defaultValueT = T
if d != None:
self.isSingleCase = True
self.defaultValue_d = d
if z != None:
self.isSingleCase = True
self.defaultValue_z = z
if xL != None:
self.isSingleCase = True
self.defaultValue_xL = xL
super(LinearWaveTheory, self).__init__()
self.exporter.close()
# end __init__
def userInput(self):
super(LinearWaveTheory, self).userInput()
self.waterType, self.rho =\
USER_INPUT.SALT_FRESH_WATER(self.isMetric)
# end userInput
def defineInputDataList(self):
self.inputList = []
if not hasattr(self, "defaultValueH"):
self.inputList.append(BaseField(\
"H: wave height (%s)" % self.labelUnitDist, 0.1, 200.0))
if not hasattr(self, "defaultValueT"):
self.inputList.append(BaseField(\
"T: wave period (sec)", 1.0, 1000.0))
if not hasattr(self, "defaultValue_d"):
self.inputList.append(BaseField(\
"d: water depth (%s)" % self.labelUnitDist, 0.1, 5000.0))
if not hasattr(self, "defaultValue_z"):
self.inputList.append(BaseField(\
"z: vertical coordinate (%s)" % self.labelUnitDist,\
-5100.0, 100.0))
if not hasattr(self, "defaultValue_xL"):
self.inputList.append(BaseField(\
"xL: horizontal coordinate as fraction of wavelength (x/L)",\
0.0, 1.0))
# end defineInputDataList
def fileOutputRequestInit(self):
self.fileOutputRequestMain(defaultFilename = "linear_wave_theory")
def getCalcValues(self, caseInputList):
currIndex = 0
if hasattr(self, "defaultValueH"):
H = self.defaultValueH
else:
H = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValueT"):
T = self.defaultValueT
else:
T = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValue_d"):
d = self.defaultValue_d
else:
d = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValue_z"):
z = self.defaultValue_z
else:
z = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValue_xL"):
xL = self.defaultValue_xL
else:
xL = caseInputList[currIndex]
currIndex = currIndex + 1
return H, T, d, z, xL
# end getCalcValues
def performCalculations(self, caseInputList, caseIndex = 0):
H, T, d, z, xL = self.getCalcValues(caseInputList)
dataDict = {"H": H, "T": T, "d": d, "z": z, "xL": xL}
twopi = 2*math.pi
nIteration = 50
L, k = WAVELEN(d, T, nIteration, self.g)
theta = xL*twopi #theta=(kx-wt) where arbitrarily t=0 and k=2*pi/L
# Check for monochromatic wave breaking (depth limited - no slope)
Hb = ERRWAVBRK1(d, 0.78)
if not (H < Hb):
self.errorMsg = "Error: Input wave broken (Hb = %6.2f %s)" %\
(Hb, self.labelUnitDist)
print(self.errorMsg)
self.fileOutputWriteMain(dataDict, caseIndex)
return
# Check to make sure vertical coordinate is within waveform
eta = (H/2)*math.cos(theta)
if not (z < eta and (z + d) > 0):
self.errorMsg = "Error: Point outside waveform."
print(self.errorMsg)
self.fileOutputWriteMain(dataDict, caseIndex)
return
# Main Computations
arg = (2*k*d/(math.sinh(2*k*d)))
tot = d + z
C = L/T
Cg = 0.5*(1 + arg)*C
E = (1.0/8.0)*self.rho*self.g*(H**2)
Ef = E*Cg
Ur = L**2*H/(d**3)
px = (-H/2)*(math.cosh(k*tot)/math.sinh(k*d))*math.sin(theta)
py = (H/2)*(math.sinh(k*tot)/math.sinh(k*d))*math.cos(theta)
u = (H*math.pi/T)*(math.cosh(k*tot)/math.sinh(k*d))*math.cos(theta)
w = (H*math.pi/T)*(math.sinh(k*tot)/math.sinh(k*d))*math.sin(theta)
dudt = (H*2*math.pi**2/(T**2))*(math.cosh(k*tot)/math.sinh(k*d))*math.sin(theta)
dwdt = (-H*2*math.pi**2/(T**2))*(math.sinh(k*tot)/math.sinh(k*d))*math.cos(theta)
pres = -self.rho*self.g*z + self.rho*self.g*(H/2)*(math.cosh(k*tot)/math.cosh(k*d))*math.cos(theta)
# plotting waveform
if self.isSingleCase:
plotxL = np.arange(-1, 1, 0.001)
plottheta = plotxL * np.pi * 2
ploteta = (H / 2) * np.cos(plottheta)
plotu = (H * np.pi / T) * (np.cosh(k * tot) / np.sinh(k * d)) * np.cos(plottheta)
plotw = (H * np.pi / T) * (np.sinh(k * tot) / np.sinh(k * d)) * np.sin(plottheta)
plt.subplot(3, 1, 1)
plt.plot(plotxL, ploteta, lw=2)
plt.ylabel('Elevation [%s]' % self.labelUnitDist)
plt.ylim(min(ploteta) - 1, max(ploteta) + 1)
plt.axhline(color = 'r', linestyle = '--')
# subplot
plt.subplot(3, 1, 2)
plt.plot(plotxL, plotu, lw=2)
plt.axhline(color = 'r', linestyle = '--')
plt.ylabel('Velocity, u [%s/s]' % self.labelUnitDist)
plt.ylim(min(plotu) - 1, max(plotu) + 1)
# subplot
plt.subplot(3, 1, 3)
plt.plot(plotxL, plotw, lw=2)
plt.axhline(color = 'r', linestyle = '--')
plt.ylabel('Velocity, w [%s/s]' % self.labelUnitDist)
plt.ylim(min(plotw) - 1, max(plotw) + 1)
plt.tight_layout(pad=0.4)
plt.show()
print("\t\t\t\t\tUnits")
print("Wavelength\t\t\t%-6.2f\t%s" % (L, self.labelUnitDist))
print("Celerity\t\t\t%-6.2f\t%s/s" % (C, self.labelUnitDist))
print("Group speed\t\t\t%-6.2f\t%s/s" % (Cg, self.labelUnitDist))
print("Energy density\t\t\t%-8.2f%s-%s/%s^2" %\
(E, self.labelUnitWt, self.labelUnitDist, self.labelUnitDist))
print("Energy flux\t\t\t%-8.2f%s-%s/%s-s" %\
(Ef, self.labelUnitWt, self.labelUnitDist, self.labelUnitDist))
print("Ursell number\t\t\t%-6.2f" % Ur)
print("Water Surface Elevation\t\t%-6.2f\t%s" %\
(eta, self.labelUnitDist))
print("Horz. displacement\t\t%-6.2f\t%s" % (px, self.labelUnitDist))
print("Vert. displacement\t\t%-6.2f\t%s" % (py, self.labelUnitDist))
print("Horz. velocity\t\t\t%-6.2f\t%s/s" % (u, self.labelUnitDist))
print("Vert. velocity\t\t\t%-6.2f\t%s/s" % (w, self.labelUnitDist))
print("Horz. acceleration\t\t%-6.2f\t%s/s^2" %\
(dudt, self.labelUnitDist))
print("Vert. acceleration\t\t%-6.2f\t%s/s^2" %\
(dwdt, self.labelUnitDist))
print("Pressure\t\t\t%-8.2f%s/%s^2" %\
(pres, self.labelUnitWt, self.labelUnitDist))
dataDict["L"] = L
dataDict["C"] = C
dataDict["Cg"] = Cg
dataDict["E"] = E
dataDict["Ef"] = Ef
dataDict["Ur"] = Ur
dataDict["eta"] = eta
dataDict["px"] = px
dataDict["py"] = py
dataDict["u"] = u
dataDict["w"] = w
dataDict["dudt"] = dudt
dataDict["dwdt"] = dwdt
dataDict["pres"] = pres
self.fileOutputWriteMain(dataDict, caseIndex)
# end performCalculations
def fileOutputWriteData(self, dataDict):
self.fileRef.write("Linear Wave Theory Summary\n\n");
self.fileRef.write("Input\n")
self.fileRef.write("Wave heights\t\t\t%8.2f %s\n" %\
(dataDict["H"], self.labelUnitDist))
self.fileRef.write("Wave period\t\t\t%8.2f s\n" % dataDict["T"])
self.fileRef.write("Water depth\t\t\t%8.2f %s\n" %\
(dataDict["d"], self.labelUnitDist))
self.fileRef.write("Vertical coordinate\t\t%8.2f %s\n" %\
(dataDict["z"], self.labelUnitDist))
self.fileRef.write("Horizontal coordinate as\t%8.2f (x/L)\n" %\
dataDict["xL"])
self.fileRef.write("fraction of wavelength\n\n")
if self.errorMsg != None:
self.fileRef.write("%s\n" % self.errorMsg)
else:
self.fileRef.write("Item\t\t\t\tValue\t\tUnits\n")
self.fileRef.write("Wavelength\t\t\t%8.2f\t%s\n" % (dataDict["L"], self.labelUnitDist))
self.fileRef.write("Celerity\t\t\t%8.2f\t%s/s\n" % (dataDict["C"], self.labelUnitDist))
self.fileRef.write("Group speed\t\t\t%8.2f\t%s/s\n" % (dataDict["Cg"], self.labelUnitDist))
self.fileRef.write("Energy density\t\t\t%8.2f\t%s-%s/%s^2\n" %\
(dataDict["E"], self.labelUnitWt, self.labelUnitDist, self.labelUnitDist))
self.fileRef.write("Energy flux\t\t\t%8.2f\t%s-%s/%s-s\n" %\
(dataDict["Ef"], self.labelUnitWt, self.labelUnitDist, self.labelUnitDist))
self.fileRef.write("Ursell number\t\t\t%8.2f\n" % dataDict["Ur"])
self.fileRef.write("Water Surface Elevation\t\t%8.2f\t%s\n" %\
(dataDict["eta"], self.labelUnitDist))
self.fileRef.write("Horz. displacement\t\t%8.2f\t%s\n" % (dataDict["px"], self.labelUnitDist))
self.fileRef.write("Vert. displacement\t\t%8.2f\t%s\n" % (dataDict["py"], self.labelUnitDist))
self.fileRef.write("Horz. velocity\t\t\t%8.2f\t%s/s\n" % (dataDict["u"], self.labelUnitDist))
self.fileRef.write("Vert. velocity\t\t\t%8.2f\t%s/s\n" % (dataDict["w"], self.labelUnitDist))
self.fileRef.write("Horz. acceleration\t\t%8.2f\t%s/s^2\n" %\
(dataDict["dudt"], self.labelUnitDist))
self.fileRef.write("Vert. acceleration\t\t%8.2f\t%s/s^2\n" %\
(dataDict["dwdt"], self.labelUnitDist))
self.fileRef.write("Pressure\t\t\t%8.2f\t%s/%s^2\n" %\
(dataDict["pres"], self.labelUnitWt, self.labelUnitDist))
exportData = [dataDict["H"], dataDict["T"], dataDict["d"], dataDict["z"], dataDict["xL"] ]
if self.errorMsg != None:
exportData.append(self.errorMsg)
else:
exportData = exportData + [dataDict["L"], dataDict["C"],\
dataDict["Cg"], dataDict["E"], dataDict["Ef"],dataDict["Ur"], dataDict["eta"],\
dataDict["px"], dataDict["py"], dataDict["u"], dataDict["w"], dataDict["dudt"],\
dataDict["dwdt"], dataDict["pres"]]
self.exporter.writeData(exportData)
# end fileOutputWriteData
driver = LinearWaveTheory()
| gpl-3.0 |
sergeykolychev/mxnet | example/gluon/dcgan.py | 30 | 8796 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_params(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_params(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_params(os.path.join(outf, 'generator.params'))
netD.save_params(os.path.join(outf, 'discriminator.params'))
| apache-2.0 |
cdemulde/wwdata | setup.py | 1 | 1774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('./docs/history.rst') as history_file:
history = history_file.read()
requirements = [
'pandas',
'numpy',
'dateutils',
'scipy',
'matplotlib',
'statsmodels',
'xlrd',
#'tkinter'
]
setup_requirements = [
# TODO(cdemulde): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='wwdata',
version='0.2.0',
description="Data analysis package aimed at data obtained in the context of (waste)water",
long_description=readme + '\n\n' + history,
author="Chaim De Mulder",
author_email='[email protected]',
url='https://github.com/UGentBIOMATH/wwdata',
packages=find_packages(include=['wwdata']),
include_package_data=True,
install_requires=requirements,
license="GNU General Public License v3",
zip_safe=False,
keywords='wwdata',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=requirements,
setup_requires=setup_requirements,
)
| agpl-3.0 |
jzt5132/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam | PyFoam/Basics/MatplotlibTimelines.py | 2 | 6466 | # ICE Revision: $Id$
"""Plots a collection of timelines"""
from PyFoam.Error import warning,error
from PyFoam.Basics.CustomPlotInfo import readCustomPlotInfo,CustomPlotInfo
from .GeneralPlotTimelines import GeneralPlotTimelines
from platform import uname
firstTimeImport=True
class MatplotlibTimelines(GeneralPlotTimelines):
"""This class opens a matplotlib window and plots a timelines-collection in it"""
figureNr=1
def __init__(self,
timelines,
custom,
showWindow=True,
registry=None):
"""@param timelines: The timelines object
@type timelines: TimeLineCollection
@param custom: A CustomplotInfo-object. Values in this object usually override the
other options
"""
self.hasSubplotHost=True
try:
global plt,matplotlib,firstTimeImport,SubplotHost
import matplotlib
if not showWindow and firstTimeImport:
# matplotlib.use("MacOSX")
matplotlib.use("agg")
firstTimeImport=False
import matplotlib.pyplot as plt
try:
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
except ImportError:
self.hasSubplotHost=False
warning("Matplotlib-Version does not support SubplotHost")
except ImportError:
error("Matplotlib not installed.")
GeneralPlotTimelines.__init__(self,timelines,custom,showWindow=showWindow,registry=registry)
self.figNr=MatplotlibTimelines.figureNr
MatplotlibTimelines.figureNr+=1
self.figure=None
self.title=""
self.xlabel=""
self.ylabel=""
self.ylabel2=""
try:
if self.spec.xlabel:
self.setXLabel(self.spec.xlabel)
except AttributeError:
pass
try:
if self.spec.ylabel:
self.setYLabel(self.spec.ylabel)
except AttributeError:
pass
try:
if self.spec.y2label:
self.setYLabel2(self.spec.y2label)
except AttributeError:
pass
self.axis1=None
self.axis2=None
self.setTitle(self.spec.theTitle)
self.with_=self.spec.with_
if not self.with_ in ['lines','points','dots','steps','linespoints']:
warning("'with'-style",self.with_,"not implemented, using 'lines'")
self.with_='lines'
self.redo()
def buildData(self,times,name,title,lastValid):
"""Build the implementation specific data
@param times: The vector of times for which data exists
@param name: the name under which the data is stored in the timeline
@param title: the title under which this will be displayed"""
a=self.axis1
if self.testAlternate(name):
a=self.axis2
data=self.data.getValues(name)
tm=times
if len(tm)>0 and not lastValid:
tm=tm[:-1]
data=data[:-1]
plotIt=True
try:
if self.spec.logscale and min(data)<=0:
plotIt=False
except AttributeError:
pass
if self.spec.start!=None or self.spec.end!=None:
start=self.spec.start
end=self.spec.end
if start==None:
start=tm[0]
if end==None:
end=tm[-1]
self.axis1.set_xbound(lower=start,upper=end)
self.axis1.set_autoscalex_on(False)
if self.axis2:
self.axis2.set_xbound(lower=start,upper=end)
self.axis2.set_autoscalex_on(False)
drawstyle='default'
marker=''
linestyle='-'
if self.with_=='lines':
pass
elif self.with_=='steps':
drawstyle='steps'
elif self.with_=='points':
linestyle=''
marker='*'
elif self.with_=='dots':
linestyle=''
marker='.'
elif self.with_=='linespoints':
marker='*'
else:
warning("'with'-style",self.with_,"not implemented, using 'lines'")
if plotIt:
a.plot(tm,
data,
label=title,
drawstyle=drawstyle,
marker=marker,
linestyle=linestyle)
def preparePlot(self):
"""Prepare the plotting window"""
plt.hot()
self.figure=plt.figure(self.figNr)
self.figure.clear()
# this is black magic that makes the legend work with two axes
if self.hasSubplotHost:
self.axis1=SubplotHost(self.figure,111)
self.figure.add_subplot(self.axis1)
else:
self.axis1=self.figure.add_subplot(111)
self.axis1.set_xlabel(self.xlabel)
self.axis1.set_ylabel(self.ylabel)
if len(self.alternate)>0:
self.axis2=self.axis1.twinx()
self.axis2.set_ylabel(self.ylabel2)
try:
if self.spec.logscale:
self.axis1.set_yscale("log")
if self.axis2:
self.axis2.set_yscale("log")
except AttributeError:
pass
def doReplot(self):
"""Replot the whole data"""
if self.hasSubplotHost:
l=self.axis1.legend(fancybox=True)
else:
l=plt.legend(fancybox=True)
# l.get_frame().set_fill(False)
if l:
l.get_frame().set_alpha(0.7)
l.get_texts()[0].set_size(10)
plt.suptitle(self.title)
plt.grid(True)
plt.draw()
# plt.show()
def actualSetTitle(self,title):
"""Sets the title"""
self.title=title
def setXLabel(self,title):
"""Sets the label on the X-Axis"""
self.xlabel=title
def setYLabel(self,title):
"""Sets the label on the first Y-Axis"""
self.ylabel=title
def setYLabel2(self,title):
"""Sets the label on the second Y-Axis"""
self.ylabel2=title
def doHardcopy(self,filename,form):
"""Write the contents of the plot to disk
@param filename: Name of the file without type extension
@param form: String describing the format"""
self.figure.savefig(filename+"."+form,format=form)
# Should work with Python3 and Python2
| gpl-2.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/json/test_json_norm.py | 2 | 7841 | import nose
from pandas import DataFrame
import numpy as np
import pandas.util.testing as tm
from pandas.io.json import json_normalize, nested_to_record
def _assert_equal_data(left, right):
if not left.columns.equals(right.columns):
left = left.reindex(columns=right.columns)
tm.assert_frame_equal(left, right)
class TestJSONNormalize(tm.TestCase):
def setUp(self):
self.state_data = [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self):
data = [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
result = json_normalize(data, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
self.assertRaises(ValueError, json_normalize, data,
'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
self.assertTrue(val in result)
def test_record_prefix(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(tm.TestCase):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
self.assertEqual(result, expected)
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
self.assertEqual(result, expected)
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
self.assertEqual(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',
'--pdb-failure', '-s'], exit=False)
| mit |
joernhees/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
kazemakase/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
aschleg/mathpy | mathpy/stats/hypothesis.py | 1 | 10914 | # encoding=utf8
import numpy as np
import pandas as pd
from collections import namedtuple
from scipy.stats import t, norm, rankdata
from mathpy.stats.summary import var
def ttest(y1, y2=None, mu=None, var_equal=False):
r"""
Performs one and two-sample t-tests.
Parameters
----------
y1
First sample to test
y2
Second sample. Optional
mu
Optional, sets the mean for comparison in the one sample t-test. Default 0.
var_equal
Optional, default False. If False, Welch's t-test for unequal variance and
sample sizes is used. If True, equal variance between samples is assumed
and Student's t-test is used.
Returns
-------
namedtuple
Namedtuple containing following values:
t-value
degrees of freedom
p-value
confidence intervals
sample means
Notes
-----
Welch's t-test is an adaption of Student's t test and is more performant when the
sample variances and size are unequal. The test still depends on the assumption of
the underlying population distributions being normally distributed.
Welch's t test is defined as:
.. math::
t = \frac{\bar{X_1} - \bar{X_2}}{\sqrt{\frac{s_{1}^{2}}{N_1} + \frac{s_{2}^{2}}{N_2}}}
where:
:math:`\bar{X}` is the sample mean, :math:`s^2` is the sample variance, :math:`n` is the sample size
If the :code:`var_equal` argument is True, Student's t-test is used, which assumes the two samples
have equal variance. The t statistic is computed as:
.. math::
t = \frac{\bar{X}_1 - \bar{X}_2}{s_p \sqrt{\frac{1}{n_1} + \frac{1}{n_2}}
where:
.. math::
s_p = \sqrt{\frac{(n_1 - 1)s^2_{X_1} + (n_2 - 1)s^2_{X_2}}{n_1 + n_2 - 2}
References
----------
Rencher, A. C., & Christensen, W. F. (2012). Methods of multivariate analysis (3rd Edition).
Student's t-test. (2017, June 20). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Student%27s_t-test&oldid=786562367
"""
if isinstance(y1, (pd.DataFrame, pd.Series)):
y1 = y1.values.squeeze()
elif isinstance(y1, np.ndarray) is False:
y1 = np.array(y1)
if y1.ndim != 1:
raise ValueError('y must be one-dimensional')
n1 = y1.shape[0]
s1 = var(y1)
ybar1 = np.mean(y1)
if y2 is not None:
if isinstance(y2, (pd.DataFrame, pd.Series)):
y2 = y2.values.squeeze()
elif isinstance(y2, np.ndarray) is False:
y2 = np.array(y2)
if y2.ndim != 1:
raise ValueError('y must be one-dimensional')
n2 = y2.shape[0]
s2 = var(y2)
ybar2 = np.mean(y2)
if var_equal is False:
tval = float((ybar1 - ybar2) / np.sqrt(s1 / n1 + s2 / n2))
else:
sp = np.sqrt(((n1 - 1.) * s1 + (n2 - 1.) * s2) / (n1 + n2 - 2.))
tval = float((ybar1 - ybar2) / (sp * np.sqrt(1. / n1 + 1. / n2)))
else:
ybar2, n2, s2 = 0.0, 1.0, 0.0
if mu is None:
mu = 0.0
tval = float((ybar1 - mu) / np.sqrt(s1 / n1))
dof = degrees_of_freedom(y1, y2)
pvalue = _student_t_pvalue(np.absolute(tval), dof)
intervals = _t_conf_int((ybar1, n1, s1), dof=dof, y=(ybar2, n2, s2))
if y2 is not None:
tTestResult = namedtuple('tTestResult', ['tvalue', 'dof', 'pvalue', 'confint', 'x_mean', 'y_mean'])
tt = tTestResult(tvalue=tval, dof=dof, pvalue=pvalue, confint=intervals, x_mean=ybar1, y_mean=ybar2)
else:
tTestResult = namedtuple('tTestResult', ['tvalue', 'dof', 'pvalue', 'confint', 'x_mean'])
tt = tTestResult(tvalue=tval, dof=dof, pvalue=pvalue, confint=intervals, x_mean=ybar1)
return tt
def mann_whitney(y1, y2, continuity=True):
r"""
Performs the nonparametric Mann-Whitney U test of two independent sample groups.
Parameters
----------
y1
One-dimensional array-like (Pandas Series or DataFrame, Numpy array, or list)
designating first sample
y2
One-dimensional array-like (Pandas Series or DataFrame, Numpy array, or list)
designating second sample to compare to first
continuity
Boolean, optional. If True, apply the continuity correction of :math:`\frac{1}{2}` to the
mean rank.
Returns
-------
namedtuple
Namedtuple of following entries that contain resulting Mann-Whitney test statistics.
Mann-Whitney U Test Statistic: The U Statistic of the Mann-Whitney test
Mean Rank: The mean rank of U statistic
Sigma: the standard deviation of U
z-value: The standardized value of U
p-value: p-value of U statistic compared to critical value
Notes
-----
The Mann-Whitney U test is a nonparametric hypothesis test that tests the null hypothesis that
there is an equally likely chance that a randomly selected observation from one sample will be
less than or greater than a randomly selected observation from a second sample. Nonparametric
methods are so named since they do not rely on the assumption of normality of the data.
The test statistic in the Mann-Whitney setting is denoted as :math:`U` and is the minimum of
the summed ranks of the two samples. The null hypothesis is rejected if :math:`U \leq U_0`,
where :math:`U_0` is found in a table for small sample sizes. For larger sample sizes,
:math:`U` is approximately normally distributed.
The test is nonparametric in the sense it uses the ranks of the values rather than the values
themselves. Therefore, the values are ordered then ranked from 1 (smallest value) to the largest
value. Ranks of tied values get the mean of the ranks the values would have received. For example,
for a set of data points :math:`\{4, 7, 7, 8\}` the ranks are :math:`\{1, 2.5, 2.5, 4\}`. The
:math:`2.5` rank comes from :math:`2 + 3 = 5 / 2`. The ranks are then added for the values for
both samples. The sum of the ranks for each sample are typically denoted by :math:`R_k` where
:math:`k` is a sample indicator.
:math:`U` for the two samples in the test, is given by:
References
----------
Mann–Whitney U test. (2017, June 20). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Mann%E2%80%93Whitney_U_test&oldid=786593885
"""
if isinstance(y1, (pd.DataFrame, pd.Series)):
y11 = y1.values.squeeze()
elif isinstance(y1, np.ndarray) is False:
y11 = np.array(y1)
else:
y11 = np.copy(y1)
if isinstance(y2, (pd.DataFrame, pd.Series)):
y22 = y2.values.squeeze()
elif isinstance(y1, np.ndarray) is False:
y22 = np.array(y2)
else:
y22 = y2.copy()
n1, n2 = y11.shape[0], y22.shape[0]
ranks = np.concatenate((y11, y22))
ranks = rankdata(ranks, 'average')
ranks = ranks[:n1]
n = n1 + n2
u1 = n1 * n2 + (n1 * (n1 + 1)) / 2. - np.sum(ranks)
u2 = n1 * n2 - u1
u = np.minimum(u1, u2)
mu = (n1 * n2) / 2. + (0.5 * continuity)
rankcounts = np.unique(ranks, return_counts=True)[1]
sigma = np.sqrt(((n1 * n2) * (n + 1)) / 12. * (1 - np.sum(rankcounts ** 3 - rankcounts) / float(n ** 3 - n)))
z = (np.absolute(u - mu)) / sigma
p = 1 - norm.cdf(z)
MannWhitneyResult = namedtuple('MannWhitneyResult', ['u', 'meanrank', 'sigma', 'zvalue', 'pvalue'])
mwr = MannWhitneyResult(u=u, meanrank=mu, sigma=sigma, zvalue=z, pvalue=p)
return mwr
def degrees_of_freedom(y1, y2=None, var_equal=False):
r"""
Computes the degrees of freedom of one or two samples.
Parameters
----------
y1 : array-like
First sample to test. Must be one-dimensional
y2 : array-like, optional
Second sample. Must be one-dimensional
var_equal
Optional, default False. If False, Welch's t-test for unequal variance and
sample sizes is used. If True, equal variance between samples is assumed
and Student's t-test is used.
Returns
-------
float
the degrees of freedom
Notes
-----
When Welch's t test is used, the Welch-Satterthwaite equation for approximating the degrees
of freedom should be used and is defined as:
.. math::
\large v \approx \frac{\left(\frac{s_{1}^2}{N_1} +
\frac{s_{2}^2}{N_2}\right)^2}{\frac{\left(\frac{s_1^2}{N_1^{2}}\right)^2}{v_1} +
\frac{\left(\frac{s_2^2}{N_2^{2}}\right)^2}{v_2}}
If the two samples are assumed to have equal variance, the degrees of freedoms become simply:
.. math::
v = n_1 + n_2 - 2
In the case of one sample, the degrees of freedom are:
.. math::
v = n - 1
References
----------
Rencher, A. C., & Christensen, W. F. (2012). Methods of multivariate analysis (3rd Edition).
Welch's t-test. (2017, June 16). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Welch%27s_t-test&oldid=785961228
"""
if isinstance(y1, (pd.DataFrame, pd.Series)):
y1 = y1.values.squeeze()
else:
y1 = np.array(y1)
if y1.ndim != 1:
raise ValueError('y must be one-dimensional')
n1 = y1.shape[0]
s1 = var(y1)
v1 = n1 - 1
if y2 is not None:
if isinstance(y2, (pd.DataFrame, pd.Series)):
y2 = y2.values.squeeze()
elif isinstance(y2, np.ndarray) is False:
y2 = np.array(y2)
if y2.ndim != 1:
raise ValueError('y must be one-dimensional')
n2 = y2.shape[0]
s2 = var(y2)
v2 = n2 - 1
if var_equal is False:
v = np.power((s1 / n1 + s2 / n2), 2) / (np.power((s1 / n1), 2) / v1 + np.power((s2 / n2), 2) / v2)
else:
v = n1 + n2 - 2
else:
v = v1
return float(v)
def _t_conf_int(x, dof, y=None):
xbar, xn, xvar = x[0], x[1], x[2]
if y is not None:
ybar, yn, yvar = y[0], y[1], y[2]
low_interval = (xbar - ybar) + t.ppf(0.025, dof) * np.sqrt(xvar / xn + yvar / yn)
high_interval = (xbar - ybar) - t.ppf(0.025, dof) * np.sqrt(xvar / xn + yvar / yn)
else:
low_interval = xbar + 1.96 * np.sqrt((xbar * (1 - xbar)) / xn)
high_interval = xbar - 1.96 * np.sqrt((xbar * (1 - xbar)) / xn)
return float(low_interval), float(high_interval)
def _student_t_pvalue(n, dof, test='two-tail'):
p = (1. - t.cdf(n, dof))
if test == 'two-tail':
p *= 2.
return p
| mit |
AIML/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
ueshin/apache-spark | python/pyspark/pandas/config.py | 14 | 15723 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Infrastructure of options for pandas-on-Spark.
"""
from contextlib import contextmanager
import json
from typing import Any, Callable, Dict, Iterator, List, Tuple, Union # noqa: F401 (SPARK-34943)
from pyspark._globals import _NoValue, _NoValueType
from pyspark.pandas.utils import default_session
__all__ = ["get_option", "set_option", "reset_option", "options", "option_context"]
class Option:
"""
Option class that defines an option with related properties.
This class holds all information relevant to the one option. Also,
Its instance can validate if the given value is acceptable or not.
It is currently for internal usage only.
Parameters
----------
key: str, keyword-only argument
the option name to use.
doc: str, keyword-only argument
the documentation for the current option.
default: Any, keyword-only argument
default value for this option.
types: Union[Tuple[type, ...], type], keyword-only argument
default is str. It defines the expected types for this option. It is
used with `isinstance` to validate the given value to this option.
check_func: Tuple[Callable[[Any], bool], str], keyword-only argument
default is a function that always returns `True` with a empty string.
It defines:
- a function to check the given value to this option
- the error message to show when this check is failed
When new value is set to this option, this function is called to check
if the given value is valid.
Examples
--------
>>> option = Option(
... key='option.name',
... doc="this is a test option",
... default="default",
... types=(float, int),
... check_func=(lambda v: v > 0, "should be a positive float"))
>>> option.validate('abc') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: The value for option 'option.name' was <class 'str'>;
however, expected types are [(<class 'float'>, <class 'int'>)].
>>> option.validate(-1.1)
Traceback (most recent call last):
...
ValueError: should be a positive float
>>> option.validate(1.1)
"""
def __init__(
self,
*,
key: str,
doc: str,
default: Any,
types: Union[Tuple[type, ...], type] = str,
check_func: Tuple[Callable[[Any], bool], str] = (lambda v: True, "")
):
self.key = key
self.doc = doc
self.default = default
self.types = types
self.check_func = check_func
def validate(self, v: Any) -> None:
"""
Validate the given value and throw an exception with related information such as key.
"""
if not isinstance(v, self.types):
raise TypeError(
"The value for option '%s' was %s; however, expected types are "
"[%s]." % (self.key, type(v), str(self.types))
)
if not self.check_func[0](v):
raise ValueError(self.check_func[1])
# Available options.
#
# NOTE: if you are fixing or adding an option here, make sure you execute `show_options()` and
# copy & paste the results into show_options 'docs/source/user_guide/options.rst' as well.
# See the examples below:
# >>> from pyspark.pandas.config import show_options
# >>> show_options()
_options = [
Option(
key="display.max_rows",
doc=(
"This sets the maximum number of rows pandas-on-Spark should output when printing out "
"various output. For example, this value determines the number of rows to be "
"shown at the repr() in a dataframe. Set `None` to unlimit the input length. "
"Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'display.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.max_rows",
doc=(
"'compute.max_rows' sets the limit of the current pandas-on-Spark DataFrame. "
"Set `None` to unlimit the input length. When the limit is set, it is executed "
"by the shortcut by collecting the data into the driver, and then using the pandas "
"API. If the limit is unset, the operation is executed by PySpark. Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'compute.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.shortcut_limit",
doc=(
"'compute.shortcut_limit' sets the limit for a shortcut. "
"It computes specified number of rows and use its schema. When the dataframe "
"length is larger than this limit, pandas-on-Spark uses PySpark to compute."
),
default=1000,
types=int,
check_func=(
lambda v: v >= 0,
"'compute.shortcut_limit' should be greater than or equal to 0.",
),
),
Option(
key="compute.ops_on_diff_frames",
doc=(
"This determines whether or not to operate between two different dataframes. "
"For example, 'combine_frames' function internally performs a join operation which "
"can be expensive in general. So, if `compute.ops_on_diff_frames` variable is not "
"True, that method throws an exception."
),
default=False,
types=bool,
),
Option(
key="compute.default_index_type",
doc=("This sets the default index type: sequence, distributed and distributed-sequence."),
default="sequence",
types=str,
check_func=(
lambda v: v in ("sequence", "distributed", "distributed-sequence"),
"Index type should be one of 'sequence', 'distributed', 'distributed-sequence'.",
),
),
Option(
key="compute.ordered_head",
doc=(
"'compute.ordered_head' sets whether or not to operate head with natural ordering. "
"pandas-on-Spark does not guarantee the row ordering so `head` could return some "
"rows from distributed partitions. If 'compute.ordered_head' is set to True, "
"pandas-on-Spark performs natural ordering beforehand, but it will cause a "
"performance overhead."
),
default=False,
types=bool,
),
Option(
key="plotting.max_rows",
doc=(
"'plotting.max_rows' sets the visual limit on top-n-based plots such as `plot.bar` "
"and `plot.pie`. If it is set to 1000, the first 1000 data points will be used "
"for plotting. Default is 1000."
),
default=1000,
types=int,
check_func=(
lambda v: v is v >= 0,
"'plotting.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="plotting.sample_ratio",
doc=(
"'plotting.sample_ratio' sets the proportion of data that will be plotted for sample-"
"based plots such as `plot.line` and `plot.area`. "
"This option defaults to 'plotting.max_rows' option."
),
default=None,
types=(float, type(None)),
check_func=(
lambda v: v is None or 1 >= v >= 0,
"'plotting.sample_ratio' should be 1.0 >= value >= 0.0.",
),
),
Option(
key="plotting.backend",
doc=(
"Backend to use for plotting. Default is plotly. "
"Supports any package that has a top-level `.plot` method. "
"Known options are: [matplotlib, plotly]."
),
default="plotly",
types=str,
),
] # type: List[Option]
_options_dict = dict(zip((option.key for option in _options), _options)) # type: Dict[str, Option]
_key_format = "pandas_on_Spark.{}".format
class OptionError(AttributeError, KeyError):
pass
def show_options() -> None:
"""
Make a pretty table that can be copied and pasted into public documentation.
This is currently for an internal purpose.
Examples
--------
>>> show_options() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
================... =======... =====================...
Option Default Description
================... =======... =====================...
display.max_rows 1000 This sets the maximum...
...
================... =======... =====================...
"""
import textwrap
header = ["Option", "Default", "Description"]
row_format = "{:<31} {:<14} {:<53}"
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
print(row_format.format(*header))
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
for option in _options:
doc = textwrap.fill(option.doc, 53)
formatted = "".join([line + "\n" + (" " * 47) for line in doc.split("\n")]).rstrip()
print(row_format.format(option.key, repr(option.default), formatted))
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
def get_option(key: str, default: Union[Any, _NoValueType] = _NoValue) -> Any:
"""
Retrieves the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
default : object
The default value if the option is not set yet. The value should be JSON serializable.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists and the default is not provided
"""
_check_option(key)
if default is _NoValue:
default = _options_dict[key].default
_options_dict[key].validate(default)
return json.loads(default_session().conf.get(_key_format(key), default=json.dumps(default)))
def set_option(key: str, value: Any) -> None:
"""
Sets the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
value : object
New value of option. The value should be JSON serializable.
Returns
-------
None
"""
_check_option(key)
_options_dict[key].validate(value)
default_session().conf.set(_key_format(key), json.dumps(value))
def reset_option(key: str) -> None:
"""
Reset one option to their default value.
Pass "all" as argument to reset all options.
Parameters
----------
key : str
If specified only option will be reset.
Returns
-------
None
"""
_check_option(key)
default_session().conf.unset(_key_format(key))
@contextmanager
def option_context(*args: Any) -> Iterator[None]:
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'compute.max_rows', 5):
... print(get_option('display.max_rows'), get_option('compute.max_rows'))
10 5
>>> print(get_option('display.max_rows'), get_option('compute.max_rows'))
1000 1000
"""
if len(args) == 0 or len(args) % 2 != 0:
raise ValueError("Need to invoke as option_context(pat, val, [(pat, val), ...]).")
opts = dict(zip(args[::2], args[1::2]))
orig_opts = {key: get_option(key) for key in opts}
try:
for key, value in opts.items():
set_option(key, value)
yield
finally:
for key, value in orig_opts.items():
set_option(key, value)
def _check_option(key: str) -> None:
if key not in _options_dict:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
class DictWrapper:
"""provide attribute-style access to a nested dict"""
def __init__(self, d: Dict[str, Option], prefix: str = ""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key: str, val: Any) -> None:
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
set_option(canonical_key, val)
else:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
def __getattr__(self, key: str) -> Union["DictWrapper", Any]:
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
return get_option(canonical_key)
elif len(candidates) == 0:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
else:
return DictWrapper(d, canonical_key)
def __dir__(self) -> List[str]:
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix == "":
candidates = d.keys()
offset = 0
else:
candidates = [k for k in d.keys() if all(x in k.split(".") for x in prefix.split("."))]
offset = len(prefix) + 1 # prefix (e.g. "compute.") to trim.
return [c[offset:] for c in candidates]
options = DictWrapper(_options_dict)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.config
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.config.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.config tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.config,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
cheeseywhiz/cheeseywhiz | latex/apb_sea_monkey_report/sim.py | 1 | 3497 | import dataclasses
import enum
import functools
import multiprocessing
import pprint
import random
import time
from matplotlib import pyplot as plt
import numpy as np
N_CONTROL = 40
N_CONTROL_SWIM = 14
N_TEST = 84
N_TEST_SWIM = 0
SAMPLE_SIZE = 20
N_SAMPLES = 10_000
def timeit(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
t_0 = time.perf_counter()
try:
value = func(*args, **kwargs)
finally:
t_1 = time.perf_counter()
print(f'{func.__name__} execution time {t_1 - t_0} seconds')
return value
return wrapped
Treatment = enum.Enum('Treatment', 'control test')
@dataclasses.dataclass
class SeaMonkey:
treatment: Treatment
is_swimming: bool
def do_experiment():
return [
*([SeaMonkey(Treatment.control, True)] * N_CONTROL_SWIM),
*([SeaMonkey(Treatment.control, False)] * (N_CONTROL
- N_CONTROL_SWIM)),
*([SeaMonkey(Treatment.test, True)] * N_TEST_SWIM),
*([SeaMonkey(Treatment.test, False)] * (N_TEST - N_TEST_SWIM)),
]
def get_observations(sample):
control_swim = 0
control_not_swim = 0
test_swim = 0
test_not_swim = 0
for sea_monkey in sample:
if sea_monkey.treatment is Treatment.control:
if sea_monkey.is_swimming:
control_swim += 1
else:
control_not_swim += 1
elif sea_monkey.treatment is Treatment.test:
if sea_monkey.is_swimming:
test_swim += 1
else:
test_not_swim += 1
return np.array([
[control_swim, control_not_swim],
[test_swim, test_not_swim],
])
def table_sums(table):
row_sums = [sum(row) for row in table]
column_sums = [
sum(row[i] for row in table)
for i in range(table.shape[1])]
return row_sums, column_sums, table.sum()
def get_expected(observations):
row_sums, column_sums, table_sum = table_sums(observations)
expected = np.empty(observations.shape)
for i, row in enumerate(observations):
for j, _ in enumerate(row):
row_sum = row_sums[i]
column_sum = column_sums[j]
expected[i][j] = row_sum * column_sum / table_sum
return expected
def get_chisq_gen(population):
while True:
sample = random.sample(population, SAMPLE_SIZE)
observations = get_observations(sample)
expected = get_expected(observations)
if expected.all():
yield (((observations - expected) ** 2) / expected).sum()
@timeit
def get_chisqs(population):
chisq_gen = get_chisq_gen(population)
return np.array([
next(chisq_gen)
for _ in range(N_SAMPLES)
])
def get_chisqs_from_experiment():
population = do_experiment()
return get_chisqs(population)
def describe(data):
five_num_sum = np.quantile(data, [0, 1 / 4, 1 / 2, 3 / 4, 1])
return {
'minimum': five_num_sum[0],
'1st quartile': five_num_sum[1],
'median': five_num_sum[2],
'3rd quartile': five_num_sum[3],
'maximum': five_num_sum[4],
'mean': data.mean(),
'standard deviation': data.std(),
'legth': data.size,
}
def analyze(data):
pprint.pprint(describe(data))
plt.hist(data)
plt.show()
def main():
chisqs = get_chisqs_from_experiment()
analyze(chisqs)
if __name__ == '__main__':
main()
| mit |
Yiangos01/ADE2017 | testingCleansing.py | 1 | 21082 | # -*- coding: utf-8 -*-
import nltk
import string
import csv
# coding=utf-8
import math, re, string, requests, json
from itertools import product
from inspect import getsourcefile
from os.path import abspath, join, dirname
import pandas as pd
import re
import string
##Constants##
# (empirically derived mean sentiment intensity rating increase for booster words)
B_INCR = 0.293
B_DECR = -0.293
# (empirically derived mean sentiment intensity rating increase for using
# ALLCAPs to emphasize a word)
C_INCR = 0.733
N_SCALAR = -0.74
# for removing punctuation
REGEX_REMOVE_PUNCTUATION = re.compile('[%s]' % re.escape(string.punctuation))
PUNC_LIST = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
NEGATE = \
["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
# booster/dampener 'intensifiers' or 'degree adverbs'
# http://en.wiktionary.org/wiki/Category:English_degree_adverbs
BOOSTER_DICT = \
{"absolutely": B_INCR, "amazingly": B_INCR, "awfully": B_INCR, "completely": B_INCR, "considerably": B_INCR,
"decidedly": B_INCR, "deeply": B_INCR, "effing": B_INCR, "enormously": B_INCR,
"entirely": B_INCR, "especially": B_INCR, "exceptionally": B_INCR, "extremely": B_INCR,
"fabulously": B_INCR, "flipping": B_INCR, "flippin": B_INCR,
"fricking": B_INCR, "frickin": B_INCR, "frigging": B_INCR, "friggin": B_INCR, "fully": B_INCR, "fucking": B_INCR,
"greatly": B_INCR, "hella": B_INCR, "highly": B_INCR, "hugely": B_INCR, "incredibly": B_INCR,
"intensely": B_INCR, "majorly": B_INCR, "more": B_INCR, "most": B_INCR, "particularly": B_INCR,
"purely": B_INCR, "quite": B_INCR, "really": B_INCR, "remarkably": B_INCR,
"so": B_INCR, "substantially": B_INCR,
"thoroughly": B_INCR, "totally": B_INCR, "tremendously": B_INCR,
"uber": B_INCR, "unbelievably": B_INCR, "unusually": B_INCR, "utterly": B_INCR,
"very": B_INCR,
"almost": B_DECR, "barely": B_DECR, "hardly": B_DECR, "just enough": B_DECR,
"kind of": B_DECR, "kinda": B_DECR, "kindof": B_DECR, "kind-of": B_DECR,
"less": B_DECR, "little": B_DECR, "marginally": B_DECR, "occasionally": B_DECR, "partly": B_DECR,
"scarcely": B_DECR, "slightly": B_DECR, "somewhat": B_DECR,
"sort of": B_DECR, "sorta": B_DECR, "sortof": B_DECR, "sort-of": B_DECR}
# check for special case idioms using a sentiment-laden keyword known to VADER
SPECIAL_CASE_IDIOMS = {"the shit": 3, "the bomb": 3, "bad ass": 1.5, "yeah right": -2,
"cut the mustard": 2, "kiss of death": -1.5, "hand to mouth": -2}
##Static methods##
def negated(input_words, include_nt=True):
"""
Determine if input contains negation words
"""
neg_words = []
neg_words.extend(NEGATE)
for word in neg_words:
if word in input_words:
return True
if include_nt:
for word in input_words:
if "n't" in word:
return True
if "least" in input_words:
i = input_words.index("least")
if i > 0 and input_words[i-1] != "at":
return True
return False
def normalize(score, alpha=15):
"""
Normalize the score to be between -1 and 1 using an alpha that
approximates the max expected value
"""
norm_score = score/math.sqrt((score*score) + alpha)
if norm_score < -1.0:
return -1.0
elif norm_score > 1.0:
return 1.0
else:
return norm_score
def allcap_differential(words):
"""
Check whether just some words in the input are ALL CAPS
:param list words: The words to inspect
:returns: `True` if some but not all items in `words` are ALL CAPS
"""
is_different = False
allcap_words = 0
for word in words:
if word.isupper():
allcap_words += 1
cap_differential = len(words) - allcap_words
if cap_differential > 0 and cap_differential < len(words):
is_different = True
return is_different
def scalar_inc_dec(word, valence, is_cap_diff):
"""
Check if the preceding words increase, decrease, or negate/nullify the
valence
"""
scalar = 0.0
word_lower = word.lower()
if word_lower in BOOSTER_DICT:
scalar = BOOSTER_DICT[word_lower]
if valence < 0:
scalar *= -1
#check if booster/dampener word is in ALLCAPS (while others aren't)
if word.isupper() and is_cap_diff:
if valence > 0:
scalar += C_INCR
else: scalar -= C_INCR
return scalar
class SentiText(object):
"""
Identify sentiment-relevant string-level properties of input text.
"""
def __init__(self, text):
if not isinstance(text, str):
text = str(text.encode('utf-8'))
self.text = text
self.words_and_emoticons = self._words_and_emoticons()
# doesn't separate words from\
# adjacent punctuation (keeps emoticons & contractions)
self.is_cap_diff = allcap_differential(self.words_and_emoticons)
def _words_plus_punc(self):
"""
Returns mapping of form:
{
'cat,': 'cat',
',cat': 'cat',
}
"""
no_punc_text = REGEX_REMOVE_PUNCTUATION.sub('', self.text)
# removes punctuation (but loses emoticons & contractions)
words_only = no_punc_text.split()
# remove singletons
words_only = set( w for w in words_only if len(w) > 1 )
# the product gives ('cat', ',') and (',', 'cat')
punc_before = {''.join(p): p[1] for p in product(PUNC_LIST, words_only)}
punc_after = {''.join(p): p[0] for p in product(words_only, PUNC_LIST)}
words_punc_dict = punc_before
words_punc_dict.update(punc_after)
return words_punc_dict
def _words_and_emoticons(self):
"""
Removes leading and trailing puncutation
Leaves contractions and most emoticons
Does not preserve punc-plus-letter emoticons (e.g. :D)
"""
wes = self.text.split()
words_punc_dict = self._words_plus_punc()
wes = [we for we in wes if len(we) > 1]
for i, we in enumerate(wes):
if we in words_punc_dict:
wes[i] = words_punc_dict[we]
return wes
class SentimentIntensityAnalyzer(object):
"""
Give a sentiment intensity score to sentences.
"""
def __init__(self, lexicon_file="vader_lexicon.txt"):
_this_module_file_path_ = abspath(getsourcefile(lambda:0))
lexicon_full_filepath = join(dirname(_this_module_file_path_), lexicon_file)
with open(lexicon_full_filepath) as f:
self.lexicon_full_filepath = f.read()
self.lexicon = self.make_lex_dict()
def make_lex_dict(self):
"""
Convert lexicon file to a dictionary
"""
lex_dict = {}
for line in self.lexicon_full_filepath.split('\n'):
(word, measure) = line.strip().split('\t')[0:2]
lex_dict[word] = float(measure)
return lex_dict
def polarity_scores(self, text):
"""
Return a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative
valence.
"""
sentitext = SentiText(text)
#text, words_and_emoticons, is_cap_diff = self.preprocess(text)
sentiments = []
words_and_emoticons = sentitext.words_and_emoticons
for item in words_and_emoticons:
valence = 0
i = words_and_emoticons.index(item)
if (i < len(words_and_emoticons) - 1 and item.lower() == "kind" and \
words_and_emoticons[i+1].lower() == "of") or \
item.lower() in BOOSTER_DICT:
sentiments.append(valence)
continue
sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)
sentiments = self._but_check(words_and_emoticons, sentiments)
valence_dict = self.score_valence(sentiments, text)
return valence_dict
def sentiment_valence(self, valence, sentitext, item, i, sentiments):
is_cap_diff = sentitext.is_cap_diff
words_and_emoticons = sentitext.words_and_emoticons
item_lowercase = item.lower()
if item_lowercase in self.lexicon:
#get the sentiment valence
valence = self.lexicon[item_lowercase]
#check if sentiment laden word is in ALL CAPS (while others aren't)
if item.isupper() and is_cap_diff:
if valence > 0:
valence += C_INCR
else:
valence -= C_INCR
for start_i in range(0,3):
if i > start_i and words_and_emoticons[i-(start_i+1)].lower() not in self.lexicon:
# dampen the scalar modifier of preceding words and emoticons
# (excluding the ones that immediately preceed the item) based
# on their distance from the current item.
s = scalar_inc_dec(words_and_emoticons[i-(start_i+1)], valence, is_cap_diff)
if start_i == 1 and s != 0:
s = s*0.95
if start_i == 2 and s != 0:
s = s*0.9
valence = valence+s
valence = self._never_check(valence, words_and_emoticons, start_i, i)
if start_i == 2:
valence = self._idioms_check(valence, words_and_emoticons, i)
# future work: consider other sentiment-laden idioms
# other_idioms =
# {"back handed": -2, "blow smoke": -2, "blowing smoke": -2,
# "upper hand": 1, "break a leg": 2,
# "cooking with gas": 2, "in the black": 2, "in the red": -2,
# "on the ball": 2,"under the weather": -2}
valence = self._least_check(valence, words_and_emoticons, i)
sentiments.append(valence)
return sentiments
def _least_check(self, valence, words_and_emoticons, i):
# check for negation case using "least"
if i > 1 and words_and_emoticons[i-1].lower() not in self.lexicon \
and words_and_emoticons[i-1].lower() == "least":
if words_and_emoticons[i-2].lower() != "at" and words_and_emoticons[i-2].lower() != "very":
valence = valence*N_SCALAR
elif i > 0 and words_and_emoticons[i-1].lower() not in self.lexicon \
and words_and_emoticons[i-1].lower() == "least":
valence = valence*N_SCALAR
return valence
def _but_check(self, words_and_emoticons, sentiments):
# check for modification in sentiment due to contrastive conjunction 'but'
if 'but' in words_and_emoticons or 'BUT' in words_and_emoticons:
try:
bi = words_and_emoticons.index('but')
except ValueError:
bi = words_and_emoticons.index('BUT')
for sentiment in sentiments:
si = sentiments.index(sentiment)
if si < bi:
sentiments.pop(si)
sentiments.insert(si, sentiment*0.5)
elif si > bi:
sentiments.pop(si)
sentiments.insert(si, sentiment*1.5)
return sentiments
def _idioms_check(self, valence, words_and_emoticons, i):
onezero = "{0} {1}".format(words_and_emoticons[i-1], words_and_emoticons[i])
twoonezero = "{0} {1} {2}".format(words_and_emoticons[i-2],
words_and_emoticons[i-1], words_and_emoticons[i])
twoone = "{0} {1}".format(words_and_emoticons[i-2], words_and_emoticons[i-1])
threetwoone = "{0} {1} {2}".format(words_and_emoticons[i-3],
words_and_emoticons[i-2], words_and_emoticons[i-1])
threetwo = "{0} {1}".format(words_and_emoticons[i-3], words_and_emoticons[i-2])
sequences = [onezero, twoonezero, twoone, threetwoone, threetwo]
for seq in sequences:
if seq in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[seq]
break
if len(words_and_emoticons)-1 > i:
zeroone = "{0} {1}".format(words_and_emoticons[i], words_and_emoticons[i+1])
if zeroone in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[zeroone]
if len(words_and_emoticons)-1 > i+1:
zeroonetwo = "{0} {1} {2}".format(words_and_emoticons[i], words_and_emoticons[i+1], words_and_emoticons[i+2])
if zeroonetwo in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[zeroonetwo]
# check for booster/dampener bi-grams such as 'sort of' or 'kind of'
if threetwo in BOOSTER_DICT or twoone in BOOSTER_DICT:
valence = valence+B_DECR
return valence
def _never_check(self, valence, words_and_emoticons, start_i, i):
if start_i == 0:
if negated([words_and_emoticons[i-1]]):
valence = valence*N_SCALAR
if start_i == 1:
if words_and_emoticons[i-2] == "never" and\
(words_and_emoticons[i-1] == "so" or
words_and_emoticons[i-1] == "this"):
valence = valence*1.5
elif negated([words_and_emoticons[i-(start_i+1)]]):
valence = valence*N_SCALAR
if start_i == 2:
if words_and_emoticons[i-3] == "never" and \
(words_and_emoticons[i-2] == "so" or words_and_emoticons[i-2] == "this") or \
(words_and_emoticons[i-1] == "so" or words_and_emoticons[i-1] == "this"):
valence = valence*1.25
elif negated([words_and_emoticons[i-(start_i+1)]]):
valence = valence*N_SCALAR
return valence
def _punctuation_emphasis(self, sum_s, text):
# add emphasis from exclamation points and question marks
ep_amplifier = self._amplify_ep(text)
qm_amplifier = self._amplify_qm(text)
punct_emph_amplifier = ep_amplifier+qm_amplifier
return punct_emph_amplifier
def _amplify_ep(self, text):
# check for added emphasis resulting from exclamation points (up to 4 of them)
ep_count = text.count("!")
if ep_count > 4:
ep_count = 4
# (empirically derived mean sentiment intensity rating increase for
# exclamation points)
ep_amplifier = ep_count*0.292
return ep_amplifier
def _amplify_qm(self, text):
# check for added emphasis resulting from question marks (2 or 3+)
qm_count = text.count("?")
qm_amplifier = 0
if qm_count > 1:
if qm_count <= 3:
# (empirically derived mean sentiment intensity rating increase for
# question marks)
qm_amplifier = qm_count*0.18
else:
qm_amplifier = 0.96
return qm_amplifier
def _sift_sentiment_scores(self, sentiments):
# want separate positive versus negative sentiment scores
pos_sum = 0.0
neg_sum = 0.0
neu_count = 0
for sentiment_score in sentiments:
if sentiment_score > 0:
pos_sum += (float(sentiment_score) +1) # compensates for neutral words that are counted as 1
if sentiment_score < 0:
neg_sum += (float(sentiment_score) -1) # when used with math.fabs(), compensates for neutrals
if sentiment_score == 0:
neu_count += 1
return pos_sum, neg_sum, neu_count
def score_valence(self, sentiments, text):
if sentiments:
sum_s = float(sum(sentiments))
# compute and add emphasis from punctuation in text
punct_emph_amplifier = self._punctuation_emphasis(sum_s, text)
if sum_s > 0:
sum_s += punct_emph_amplifier
elif sum_s < 0:
sum_s -= punct_emph_amplifier
compound = normalize(sum_s)
# discriminate between positive, negative and neutral sentiment scores
pos_sum, neg_sum, neu_count = self._sift_sentiment_scores(sentiments)
if pos_sum > math.fabs(neg_sum):
pos_sum += (punct_emph_amplifier)
elif pos_sum < math.fabs(neg_sum):
neg_sum -= (punct_emph_amplifier)
total = pos_sum + math.fabs(neg_sum) + neu_count
pos = math.fabs(pos_sum / total)
neg = math.fabs(neg_sum / total)
neu = math.fabs(neu_count / total)
else:
compound = 0.0
pos = 0.0
neg = 0.0
neu = 0.0
sentiment_dict = \
{"neg" : round(neg, 3),
"neu" : round(neu, 3),
"pos" : round(pos, 3),
"compound" : round(compound, 4)}
return sentiment_dict
##BeingMaryJane LiveEvents, #BlackAdam news, #CCR17 news,#CookSchoolGBC liveEvents
#['#2Awesome','#bbcaq','#bbcfootball','#BeerBods','#BeingMaryJane','#BernieEcclestone','#Bett2017','#bettchat','#BigWeekend','#BlackAdam','#CCR17',
#'#CheckatradeTrophy','#Concentrix','#CookSchoolGBC'
if __name__ == '__main__':
exclude2=['#4HSepang','#Advtravelconf','#AJBellNBL','#All80sHour','#andersonprog','#arcgap','#archantawards','#BBCIntroducingOnRadioWales','#BestFans2017',
'#bloggersbookfeast','#botchedupbodies','#brainwashingstacey','#BritainsBenefitTenants','#C4PopUp','#CBBStacey','#ccvideo','#cdfcouncil','#cfginvest17',
'#CleggQMUL','#cmawards16','#CookSchoolGBC','#covhour','#creativeshootout17','#CremeEggHuntingSeason','#diaries17','#DisruptionSWT','#diversetv','#DolanTwinsNewVideo',
'#eastawardsni','#ENLScores','#ESRCRacisms','#FashionRules','#Fidelio','#FreedomsRoad','#GetYourTattsOut','#GSFestivals','#heavenclubspa','#homelesssunday',
'#IAmLegend','#ifgdirector','#insecttunage','#InspireMBG2017','#itsgoneviral','#ITVBoxing','#kimcarnival','#LateJunction','#loudertogetherlaunch','#LoveSpecies',
'#MakeSomeoneSmelly','#MarioKart101','#MarstonGreen','#midlandshour','#MSFTBurnsSupper','#Muskedragons','#NapaSplash2017','#NiNoKuni2','#nogutsnoglory',
'#ODEONScreenUnseen','#ODIFridays','#oneshow','#PoemsAboutTrumpAndMay','#producttank','#r4mediashow','#RLhfctor','#RoadsPolice2017','#RSAeducation','#RSALies',
'#schoolfunding','#seeyouinthefields','#SGTNN2','#ShakespeareSunday','#silvertowntunnel','#socinnpolicy','#Sportscene','#StDwynwensDay','#stereounderground','#SuperSunday',
'#sussexgrad','#TheBigQuestions','#TheresaAndDonald','#ThisMorning','#TrapNominatedLIVE','#TuringLecture','#TW3Awards','#UKCA17','#USdebate','#WesternSecurity','#windinthewillows','#WoolfWorks',
'#WordsByCamila','#worldwideawards','Adama Traore','Airdrie Savings Bank','Alex Bray','Andy Welsh','Annie Power','Article 50','Bart McQueen','Cammy Smith','Chris Clements','Danny Murphy','Don Bersy','Fashion Business','Future Islands','Gail Porter','Handsworth','Highs of 5C','Howard Webb','James Bree','Jamie Hanson','Joel Matip','Josh Windass','Kasim','Keith Curle','Lazar Markovic','Leon Barrett-Hazle','Liz Carr','Lord Bracadale','Luke Amos','Marcus Haber','Marston Green','Mason Mount','Members of Muirfield','Michael Bowditch','Munster v Toulouse','Napoli','Niall Keown','Pascal Lamy','Paul Hunter','Premier Bond','Ravel Morrison','Reigate','Sam Billings','Samoa','Scottish Event Campus','Serena Williams','Sir Patrick Coghlin','Str8 Grove','Stuart Findlay','Tommy Fleetwood','Tsonga','Wasps','West Indies in March','Young Progress Makers','HALF TIME']
exclude=['DON\'T GO TO SLEEP','Emmanuel Adebayor']
analyzer = SentimentIntensityAnalyzer()
tweets = pd.read_csv('data.csv',sep='\t')
print 'name\tcategory\tid\ttweet_id\tuser_id\ttweet_text\thashtags_count\turl_count\tis_retweeted\tlang\tretweet_count\tfavorite_count\tdate\ttime\ttopic\tneg\tneu\tpos\tcompound\tquest\texla\tlength'
for tweet in tweets.values:
to_lang="en"
if tweet[14] not in exclude:
if 'hour' not in tweet[14].lower():
vs = analyzer.polarity_scores(tweet[5])
exla=0
if "!" in tweet[5]:
exla=1
# Question
quest=0
if "?" in tweet[5]:
quest=1
# Length
length=len(tweet[5])
print '\t'.join(str(x) for x in list(tweet))+"\t"+str(vs['neg'])+"\t"+str(vs['neu'])+"\t"+str(vs['pos'])+"\t"+str(vs['compound'])+"\t"+str(quest)+"\t"+str(exla)+"\t"+str(length)
| gpl-3.0 |
tejasckulkarni/hydrology | ch_634/ch_634_stage_area.py | 2 | 4549 | __author__ = 'kiruba'
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import rc
from scipy.interpolate import griddata
from matplotlib import cm
from matplotlib.path import *
from mpl_toolkits.mplot3d import axes3d, Axes3D
import matplotlib as mpl
import matplotlib.colors as mc
import checkdam.checkdam as cd
# latex parameters
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=18)
base_file = '/media/kiruba/New Volume/milli_watershed/stream_profile/634/base_profile_634.csv'
df_base = pd.read_csv(base_file, header=-1, skiprows=1)
# correction
df_base.ix[1,5] = -0.02
df_base.ix[2,5] = -0.03
# print df_base.head()
df_base.ix[1:,1:] = df_base.ix[1:, 1:].add(0.03)
# print df_base.head()
df_base_trans = df_base.T
df_base_trans.columns = df_base_trans.ix[0, 0:]
# print df_base_trans
df_base_trans = df_base_trans.ix[1:, 1500:]
# created_profile = created_profile[sorted(created_profile.columns)]
created_profile = df_base_trans
# print created_profile.head()
sorted_df = created_profile.iloc[0:, 1:]
sorted_df = sorted_df[sorted(sorted_df.columns)]
sorted_df = sorted_df.join(created_profile.iloc[0:, 0], how='right')
created_profile = cd.set_column_sequence(sorted_df, [1500])
# print created_profile.head()
# raise SystemExit(0)
"""
Create (x,y,z) point cloud
"""
z_array = created_profile.iloc[0:, 1:]
columns = z_array.columns
z_array = z_array.values
index = created_profile.iloc[0:,0]
df = pd.DataFrame(z_array, columns=columns).set_index(index)
data_1 = []
for y, row in df.iteritems():
for x, z in row.iteritems():
data_1.append((x, y, z))
data_1_df = pd.DataFrame(data_1, columns=['x', 'y', 'z'])
# print data_1_df.dtypes
# raise SystemExit(0)
X = data_1_df.x
Y = data_1_df.y
Z = data_1_df.z
## contour and 3d surface plotting
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.gca(projection='3d')
# ax = fig.add_subplot(1, 2, 1, projection='3d')
xi = np.linspace(X.min(), X.max(), 100)
yi = np.linspace(Y.min(), Y.max(), 100)
# print len(xi)
# print len(yi)
# print len(Z)
zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='linear') # create a uniform spaced grid
xig, yig = np.meshgrid(xi, yi)
surf = ax.plot_wireframe(X=xig, Y=yig, Z=zi, rstride=5, cstride=3, linewidth=1)#, cmap=cm.coolwarm, antialiased=False) # 3d plot
# inter_1 = []
# inter_1.append((xi, yi, zi))
# inter = pd.DataFrame(inter_1, columns=['x', 'y', 'z'])
# inter.to_csv('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/inter.csv') # interpolation data output
# fig.colorbar(surf, shrink=0.5, aspect=5)
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel(r'\textbf{X} (m)')
# plt.ylabel(r'\textbf{Y} (m)')
# plt.title(r"Profile for 591", fontsize=16)
plt.gca().invert_xaxis() # reverses x axis
# # ax = fig
# plt.savefig('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/linear_interpolation')
plt.show()
# raise SystemExit(0)
# ## trace contours
# Refer: Nikolai Shokhirev http://www.numericalexpert.com/blog/area_calculation/
check_dam_height = 0.64 #metre
levels = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.1,0.2, 0.3,0.4, 0.5, 0.6, 0.64, 0.7, 0.8] #, 3.93]
cmap = cm.hot
norm = mc.BoundaryNorm(levels, cmap.N )
plt.figure(figsize=(11.69, 8.27))
CS = plt.contourf(xi, yi, zi, len(levels), alpha=.75, norm=norm, levels=levels)
C = plt.contour(xi, yi, zi, len(levels), colors='black', linewidth=.5, levels=levels)
plt.clabel(C, inline=1, fontsize=10)
plt.colorbar(CS, shrink=0.5, aspect=5)
plt.yticks(np.arange(0,30, 5))
plt.xticks(np.arange(-6,6, 2))
plt.grid()
plt.gca().invert_xaxis()
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_634/cont_2d')
plt.show()
contour_a = cd.contour_area(CS)
cont_area_df = pd.DataFrame(contour_a, columns=['Z', 'Area'])
plt.plot(cont_area_df['Z'], cont_area_df['Area'])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.ylabel(r'\textbf{Area} ($m^2$)')
plt.xlabel(r'\textbf{Stage} (m)')
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_634/cont_area_634')
plt.show()
cont_area_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_634/cont_area.csv')
created_profile.iloc[0] = created_profile.columns
# print created_profile
created_profile.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_634/created_profile_634.csv')
| gpl-3.0 |
joshloyal/scikit-learn | sklearn/ensemble/__init__.py | 153 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
rhetoricked/WrittenCommunication2016 | Module3Biber.py | 1 | 30828 | '''
GenderGenreMod3
Copyright 2016 Brian N. Larson and licensors
GENDER/GENRE PROJECT CODE: Module 3
This code is the third segment used to generate and analyze the data for the article
Gender/Genre: The Lack of Gendered Register in Texts Requiring Genre Knowledge.
_Written Communication_, 33(4), 360–384. https://doi.org/10.1177/0741088316667927
(the "Article"). If you use this code, you should cite to the Article.
This module tallies the instances of the features identified by Biber (1988,
1995) as constituting the involved-informational dimension in linguistic
register. It performs the analysis on all three versions of the corpus, Fulltext,
Facttext, and Nonfacttext. (The Article reported only on Fulltext.)
WORKS CITED
Biber, D. (1988). Variation across speech and writing. Cambridge U.K.:
Cambridge University Press.
Biber, D. (1995). Dimensions of register variation : a cross-linguistic
comparison. Cambridge ;;New York: Cambridge University Press.
'''
#!/usr/bin/env python
from __future__ import division
#Set up run-time variables for files and directories.
import os
import shutil
import sys
import pickle
import logging
import datetime
now = datetime.datetime.now().strftime("%y-%m-%d %H.%M.%S")
from io import StringIO #"For strings StringIO can be used like a file opened in
#text mode." See https://docs.python.org/3/library/io.html
import nltk
import numpy, re, pprint, matplotlib, pylab #re is for regular expressions
import scipy.stats as stats
import csv
from lxml import etree
# This parser change for lxml results from this recommendation:
# http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output
parser = etree.XMLParser(remove_blank_text=True) #it affects subsequent etree.parse calls that use it as a second argument
#Sentence tokenizer (sentence splitter using sent_tokenize default, which is?)
from nltk.tokenize import sent_tokenize
#Word tokenizer Version using TreebankWorkTokenizer
from nltk.tokenize import TreebankWordTokenizer
tokenizer = TreebankWordTokenizer()
######
#DIRECTORY AND FILE VARIABLES
######
#These set some of the working directories and files. They are set manually,
#and users should be sure that the directories exist and are empty before running
#the code.
#Following line is name selected for directory with corpora out from Module 2.
existingNLTK_dir = "NLTKCorporaUncatUntag/"
#Use next five lines only if you want to solicit end-user input regarding
#directorly
print ("\n \n \nYou must enter a directory path for a directory that contains ")
print ("a folder called " + existingNLTK_dir + " with corpora files from Module 2.")
print ("And THAT folder has to have a Pickles/ directory in it.")
print ("Be sure to include the / at the end of the path! \n \n")
home_dir = input("Enter the path for the data working directory root:")
run_root = home_dir
xml_dir = run_root + "XMLOutfromPython/"
pickle_dir = run_root + "NLTKCorporaUncatUntag/Pickles/"
#####
#DEBUGGING CODE
#####
#The following lines relate to options for debugging the code.
#The following line sets up debugging, which allows the recording of notices
#regarding the progress of the module to be printed to the screen or recorded
#in a log file.
logging_file = home_dir + 'Module3Biber' + " " + str(now + ".log")
logging.basicConfig(filename=logging_file, filemode='w', level=logging.DEBUG)
#To log to a file, add these parameters to previous basicConfig:
# filename=logging_file, filemode='w',
#To log to the console, delete the parameters in the previous line.
#This code records some basic run information
logging.debug(" Gender/genre Module 3Biber: Run " + str(now))
logging.debug(" Run on data in " + home_dir)
logging.debug(" Source of corpora files out from Module 2: " + existingNLTK_dir)
logging.debug(" Output from this module in: " + pickle_dir)
#For testing, it may be desirable to pull just a few papers. The next variable works
#with code below to select only those files from the start_wd directory that
#begin with these characters. Note that these are strings and should be only
#four characters long.
sought_papers = ["1007", "1055", "1116", "2013", "2021"]
#####
#WORD LISTS
#####
#The following lines load various wordlists used in this module.
with open((run_root + "PrivateVerbs.txt"), encoding = "utf-8") as f:
privateVerbs = f.read().splitlines()
with open((run_root + "PubVerbs.txt"), encoding = "utf-8") as f:
publicVerbs = f.read().splitlines()
with open((run_root + "SuaVerbs.txt"), encoding = "utf-8") as f:
suasiveVerbs = f.read().splitlines()
priPubSuaVerbs = privateVerbs + publicVerbs + suasiveVerbs
with open((run_root + "Contractions.txt"), encoding = "utf-8") as f:
contracts = f.read().splitlines()
with open((run_root + "AllP-Whp.txt"), encoding = "utf-8") as f:
allP_whP = f.read().splitlines()
with open((run_root + "Amplifiers.txt"), encoding = "utf-8") as f:
amplifiers = f.read().splitlines()
with open((run_root + "CL-P.txt"), encoding = "utf-8") as f:
clpunct = f.read().splitlines()
allpunct = clpunct
allpunct.append(',')
with open((run_root + "Not.txt"), encoding = "utf-8") as f:
analyticNeg = f.read().splitlines()
with open((run_root + "Dem.txt"), encoding = "utf-8") as f:
demonstrative = f.read().splitlines()
with open((run_root + "FirstPers.txt"), encoding = "utf-8") as f:
firstPersPro = f.read().splitlines()
with open((run_root + "IndefPro.txt"), encoding = "utf-8") as f:
indefPro = f.read().splitlines()
with open((run_root + "SecPers.txt"), encoding = "utf-8") as f:
secPersPro = f.read().splitlines()
with open((run_root + "Subpro.txt"), encoding = "utf-8") as f:
subjectPro = f.read().splitlines()
with open((run_root + "WhoWords.txt"), encoding = "utf-8") as f:
whoWords = f.read().splitlines()
with open((run_root + "WHPwords.txt"), encoding = "utf-8") as f:
wHPWords = f.read().splitlines()
def process(section):
nltkcorpus_dir = run_root + existingNLTK_dir + section + "/"
logging_file = nltkcorpus_dir + "3BiberLog " + section + ".log" #5-1 moved out to global or deleted.
results_file = pickle_dir + section + "Features.pickle"
csv_out_file = run_root + "BiberOutput " + section + " " + str(now) + ".csv"
#papers will be a list of dictionaries. Each dictionary will have the features for one paper.
papers = [ ]
def getgender(paper_num): #Move this function outside the process function.
logging.debug(paper_num + " GET GENDER")
logging.debug("\n_____")
#Go into the XML document to get the gender.
for f in os.listdir(xml_dir):
if f.startswith(paper_num):
xml_doc_full_path = xml_dir + f
gate_doc = etree.parse(xml_doc_full_path, parser) #Parse file with defined parser creating ElementTree
doc_root = gate_doc.getroot() #Get root Element of parsed file
logging.debug("\n\n") # For debugging.
logging.debug ("XML Paper " + paper_num + " loaded.") # This is just for debugging.
#These lines grab the Analysis_Gender value from the text and assign it to var gender. We'll use it to categorize the NLTK corpus
#and to withhold non-gender-categorized texts from the corpus. This prolly should be a function, but that will happen later.
gender = ""
gg = doc_root.find("GG")
quest = gg.find("Questionnaire")
for i in quest.iter("Feature"):
if i.get("Name") == "Analysis_Gender":
gender = i.get("Value")
logging.debug("Gender: " + str(gender)) #for debugging
return gender
#Begin loop over papers
#For each paper, we want to do these things:
#Create a dictionary containing the following keys/values.
# 1. Gender of text author.
# See Biber 1995, p. 142, Table 6.1 for explanation of items.
# See Reymann 2002 for discussion of implementation for automation.
# Involved end:
# 2. Count of private verbs: Tagged as verb and in list.
# 3. Count of THAT-deletion:
# a. PUB/PRV/SUA + demonstrative pronoun/SUBJPRO5
# b. PUB/PRV/SUA + PRO/N + AUX/V
# c. PUB/PRV/SUA + ADJ/ADV/DET/POSSPRO + (ADJ) + N + AUX/V
# d. See lists of public, private, suasive verbs; dems; subpros
# 4. Count of Contractions. See list.
# 5. Count of Present-tense verbs. Use tagger tag VBP or VBZ
# 6. Count of Second-person pronouns. See SecPers list.
# 7. Count of DO as pro-verb. Any form of do (do does did done) NOT
# in the following:
# a. DO + (ADV) + V (DO as auxiliary)
# b. ALL-P/WHP + DO (where ALL-P/WHP is in file AllP-Whp.txt)
# 8. Count of Analytic negation (with 'not' or 'n't')
# 9. Count of demonstrative pronouns.
# that/this/these/those + V/AUX/WHP/and
# 10. Count of General emphatics
# a. for sure
# b. a lot
# c. such a
# d. real + ADJ
# e. so + ADJ
# f. DO + V
# g. just
# h. really
# i. most
# j. more
# 11. Count First-person pronouns. See FirstPers.txt.
# 12. Count of Pronoun IT. Count all instances of it.
# 13. Count of BE as main verb.
# Form of BE: be, is are, was, were, been--followed by any of these
# DT / PRP$ / IN / JJ / JJR / JJS
# 14. Count of Causative subordination. Just count BECAUSE
# 15. Count of Discourse particles. CL-P followed by any of:
# well / now / anyway / anyhow / anyways
# 16. Count of Indefinite pronouns.
# a. any of IndefPro.txt OR
# b. no one
# 17. Count of General hedges. Did not use all of Reymann, only
# at about / something like / more or less / almost / maybe
# 18. Count of Amplifiers. If word is in Amplifiers.
# 19. Count of Sentence relatives. Comma followed by which.
# 20. Count of WH questions. Sentence boundary or CL-P + WhoWord.txt + MD
# 21. Count of Possibility modals. All of can, could, may, and might
# 22. Count of Non-phrasal coordination. Skipped here.
# 23. Count of WH clauses. PUB/PRV/SUA + WHP/WHO + not(MD)
# 24. Count of Final prepositions. IN followed by CL-P or comma.
# 25. Count of (Adverbs). RB, RBR, RBS
# Informational end:
# 26. Count of Nouns.
# 27. Mean Word Length. Mean length of all non-punct.
# 28. Count of prepositions. IN, less 24. Did not exclude those that
# function as time and place adverbials, conjuncts or subordinators.
# 29. Type-token ratio. Frequency count of types within first 400 words.
# 30. Count of Attributive adjectives. JJ/JJR/JJS + (JJ/JJR/JJS or Noun)
# 31. Count of (Place adverbials) Skipped here
# 32. Count of (Agentless passives) Skipped here
# 33. Count of (Past participial postnominal clauses) Skipped here.
for file_name in os.listdir(nltkcorpus_dir):
paper_num = file_name[0:4] #Note: this makes paper_num a str
if ((not file_name.startswith('.')) ) : #Screens out Mac OS hidden files,
#names of which start '.'
#If you want only limited files, add this condition to the preceding if-statement: and paper_num in sought_papers
#and then uses only files BNL
#selected in sought_papers list above
filepath = nltkcorpus_dir + file_name
logging.debug("\n*************************************************************************")
logging.debug ("LOADING " + filepath)
logging.debug ("\n*************************************************************************")
print("\n*************************************************************************")
print ("LOADING " + filepath)
print ("\n*************************************************************************")
#opens the subject file , reads its contents, and closes it.
f = open(filepath, encoding="utf-8")
infile = f.read()
f.close()
#Declare the feature dictionary for this paper
paper_dict = { }
paper_dict["A_papernum"] = paper_num #NO-NORMALIZE
#Populate the dictionary with key/0 for each feature in the list of features.
paper_dict["A_tokens"] = 0 # Total tokens in paper NO-NORMALIZE
paper_dict["A_sents"] = 0 #Total sentences in paper NO-NORMALIZE
paper_dict["A_words"] = 0 #Total number of non-punct tokens. NO-NORMALIZE
paper_dict["01Gender"] = getgender(paper_num) #NO-NORMALIZE
paper_dict["02PrivateVerbs"] = 0 #Private verbs.
paper_dict["03ThatDeletion"] = 0 #
paper_dict["04Contractions"] = 0
paper_dict["05PresVerbs"] = 0 #Present-tense verbs.
paper_dict["06SecPersPrn"] = 0 #Second-person pronouns
paper_dict["07DOproverb"] = 0 #Definition below.
paper_dict["08AnalyticNeg"] = 0 #Negation with not or n't
paper_dict["9DemoPron"] = 0 #Demonstrative pronouns
paper_dict["10GenEmphatics"] = 0 #
paper_dict["11FirstPersPrn"] = 0 #First-pe rson pronouns
paper_dict["12It"] = 0 #count of IT
paper_dict["13BeMain"] = 0 #BE as a main verb
paper_dict["14CauseSub"] = 0 # Count because
paper_dict["15DiscPart"] = 0 #Count as specified.
paper_dict["16IndefPro"] = 0 # Indefinite pronouns
paper_dict["17GenHedges"] = 0 #Count of general hedges
paper_dict["18Amplifiers"] = 0
paper_dict["19SentRelatives"] = 0 #Sentence followed by which.
paper_dict["20WhQuestion"] = 0 # Wh questions.
paper_dict["21PossModals"] = 0 # Possibility modals.
paper_dict["22NonPhrasalCoord"] = 0 #Skipping.
paper_dict["23WhClauses"] = 0 #WH clauses.
paper_dict["24FinalPreps"] = 0 #Final prepositions.
paper_dict["25Adverbs"] = 0
paper_dict["26Nouns"] = 0
paper_dict["27WordLength"] = 0 #mean length of non-punct words NO-NORMALIZE
paper_dict["28Preps"] = 0 #Prepositions (other than 24)
paper_dict["29TTRatio"] = 0 # Type-token ratio NO-NORMALIZE
paper_dict["30AttribAdj"] = 0 #Attributive adjectives
paper_dict["31PlaceAdverbs"] = 0 #Place adverbials. Skipped here.
paper_dict["32AgentlessPass"] = 0 #Agentless passives. Skipped here.
paper_dict["33PPPC"] = 0 #Past participial postnominal clauses. Skipped here.
logging.debug("\n_____")
logging.debug("Paper " + paper_num + "'s number of features: " + str(len(paper_dict)))
logging.debug("\n_____")
first400TTypes = []
aggWordLength = 0
#Tokenize infile into sentences. The result is a list of sentences.
sentences = sent_tokenize(infile)
paper_dict["A_sents"] = len(sentences)
#Begin loop over sentences in paper.
logging.debug("\n_____")
logging.debug(paper_num + " LOOP OVER SENTENCES")
logging.debug("\n_____")
sentence_counter = 0
for i in sentences: #For each sentence in the paper...
logging.debug("\nPaper-Sentence number: " + paper_num + "-" + str(sentence_counter))
logging.debug(i) # This is just for debug.
#Word-tokenize it.
tokenized = tokenizer.tokenize(i) #Result is a list of word-tokens.
logging.debug("\nTokenized sentence: " + str(sentence_counter))
logging.debug(tokenized) #for debug only
#POS Tag it
postagged = nltk.pos_tag(tokenized) #Result is a list of
#tuples, with word-token and pos-token.
logging.debug("\nPOS tagged sentence: " + str(sentence_counter))
logging.debug(postagged) #for debug only
l = len(postagged)
logging.debug("Sentence length: " + str(l))
#for i in postagged:
for index, token in enumerate(postagged):
paper_dict["A_tokens"] += 1 #Increments paper token counter.
#THESE LINES SET THE CURRENT AND CONTEXT TOKEN VALUES.
this_token = postagged[index]
this_type = this_token[0].lower()
this_tag = this_token[1]
#print("This: " + str(this_type) + " " + (str(this_tag)))
if index > 0:
prevToken = postagged[index - 1]
else:
prevToken = ["NULL","NULL"]
prev_type = prevToken[0]
prev_tag = prevToken[1]
#print("Prev: " + str(prev_type) + " " + (str(prev_tag)))
if index < (l - 1):
token1 = postagged[index + 1]
else:
token1 = ["NULL","NULL"]
t1_type = token1[0]
t1_tag = token1[1]
#print("T1: " + str(t1_type) + " " + (str(t1_tag)))
if index < (l - 2):
token2 = postagged[index + 2]
else:
token2 = ["NULL","NULL"]
t2_type = token2[0]
t2_tag = token2[1]
#print("T2: " + str(t2_type) + " " + (str(t2_tag)))
if index < (l - 3):
token3 = postagged[index + 3]
else:
token3 = ["NULL","NULL"]
t3_type = token3[0]
t3_tag = token3[1]
#print("T3: " + str(t3_type) + " " + (str(t3_tag)))
#print("T4 if")
if index < (l - 4):
token4 = postagged[index + 4]
else:
#print("T4 else")
token4 = ["NULL","NULL"]
#print("T4 assignments")
t4_type = token4[0]
t4_tag = token4[1]
#print("T4: " + str(t4_type) + " " + (str(t4_tag)))
#DONE SETTING TOKEN VALUES.
if not (this_type in allpunct):
paper_dict["A_words"] += 1
aggWordLength += len(this_type)
if not (paper_dict["A_words"]>400):
if this_type not in first400TTypes:
first400TTypes.append(this_type)
if this_type in privateVerbs:
paper_dict["02PrivateVerbs"] += 1
if ((#Condition 1
(this_type in priPubSuaVerbs) and
((t1_type in demonstrative) or (t1_type in subjectPro))) or
(#Condition 2
(this_type in priPubSuaVerbs) and
(t1_tag in ["PRP", "NN", "NNS", "NNP", "NNPS"]) and
(t2_tag in ["MD", "VB", "VBD", "VBG", "VBN", "VBP"]) ) or
(#Condition 3a
(this_type in priPubSuaVerbs) and
(t1_tag in ["JJ", "JJR", "JJS", "RB", "RBR", "RBS",
"DT", "PRP$"]) and
(t2_tag in ["NN", "NNS", "NNP", "NNPS"]) and
(t3_tag in ["MD", "VB", "VBD", "VBG", "VBN", "VBP"]) ) or
(#Condition 3b
(this_type in priPubSuaVerbs) and
(t1_tag in ["JJ", "JJR", "JJS", "RB", "RBR", "RBS",
"DT", "PRP$"]) and
(t2_tag in ["JJ", "JJR", "JJS"]) and
(t3_tag in ["NN", "NNS", "NNP", "NNPS"]) and
(t4_tag in ["MD", "VB", "VBD", "VBG", "VBN", "VBP"]) )
):
logging.debug("THAT deletion: " + this_type + " " + t1_type
+ " " + t2_type + " " + t3_type + " " + t4_type)
paper_dict["03ThatDeletion"] += 1
if this_type in contracts:
paper_dict["04Contractions"] += 1
if this_tag in ['VBP', 'VBZ']:
paper_dict["05PresVerbs"] += 1
if this_type in secPersPro:
paper_dict["06SecPersPrn"] += 1
if ( this_type in ["do", "does", "did", "doing", "done"] and not
((#Condition 1a
(t1_type in ["VB", "VBD", "VBG", "VBN", "VBP"]) ) or
(#Condition 1b
(t1_type in ["RB", "RBR", "RBS"]) and
(t2_type in ["VB", "VBD", "VBG", "VBN", "VBP"]) ) or
(#Condition 2
(prev_type in allP_whP) or
(prev_type == "NULL") )
)):
paper_dict["07DOproverb"] += 1
if this_type in analyticNeg:
paper_dict["08AnalyticNeg"] += 1
if ((this_type in demonstrative) and
( (t1_tag in ["MD", "VB", "VBD", "VBG", "VBN", "VBP"]) or
(t1_type == "and") or
(t1_type in wHPWords))):
paper_dict["9DemoPron"] += 1
logging.debug("Demonstrative pron: " + prev_type + " " +
this_type + " " + t1_type + " " + t2_type)
if ( (#Condition 1
(this_type in ["just", "really", "most", "more"]) ) or
(#Condition 2
(this_type == "for" and t1_type == "sure") or
(this_type == "a" and t1_type == "lot") or
(this_type == "such" and t1_type == "a") or
(this_type in ["real", "so"] and t1_tag == "JJ") or
(this_type in ["do", "does", "did", "doing", "done"]
and t1_type in ["VB", "VBD", "VBG", "VBN", "VBP"])
)):
paper_dict["10GenEmphatics"] += 1
logging.debug("General emphatic: " + this_type + " " + t1_type)
if this_type in firstPersPro:
paper_dict["11FirstPersPrn"] += 1
if this_type == "it":
paper_dict["12It"] += 1
if (this_type in ["be", "is", "are", "was", "were",
"been", "being"] and
t1_tag in ["DT", "PRP$", "IN", "JJ", "JJR", "JJS"]):
paper_dict["13BeMain"] += 1
logging.debug("BE main verb: " + this_type + " " + t1_type)
if this_type == "because":
paper_dict["14CauseSub"] += 1
if (this_type in ["well", "now", "anyway", "anyhow", "anyways"]
and (prev_type in clpunct or prev_type == "NULL")):
paper_dict["15DiscPart"] += 1
logging.debug("Discourse particle: " + prev_type + " " + this_type)
if (this_type in indefPro or
this_type == "no" and t1_type == "one"):
paper_dict["16IndefPro"] += 1
logging.debug("Indef pronoun: " + this_type + " " + t1_type)
if (this_type in ['almost', 'maybe'] or
((this_type == "at" and t1_type == "about") or
(this_type == "something" and t1_type == "like") or
(this_type == "more" and t1_type == "or" and t2_type == "less")
)
):
paper_dict["17GenHedges"] += 1
logging.debug("General hedge: " + this_type + " " + t1_type +
" " + t2_type)
if this_type in amplifiers:
paper_dict["18Amplifiers"] += 1
if (prev_type == "," and this_type == "which"):
paper_dict["19SentRelatives"] += 1
logging.debug("Sentence relative: " + prev_type + " " + this_type +
" " + t1_type)
if (this_type in whoWords and
(prev_type == "NULL" or prev_type in clpunct) and
(t1_tag == "MD")):
paper_dict["20WhQuestion"] += 1
logging.debug("WH question: " + prev_type + " " + this_type +
" " + t1_type)
if this_type in ['can', 'could', 'may', 'might']:
paper_dict["21PossModals"] += 1
if ( ((this_type in wHPWords) or (this_type in whoWords)) and
(prev_type in priPubSuaVerbs) and
not (t1_tag == "MD")
):
paper_dict["23WhClauses"] += 1
logging.debug("WH clause: " + prev_type + " " + this_type +
" " + t1_type)
if (this_tag == "IN" and (t1_type in clpunct or t1_type == ",")
):
paper_dict["24FinalPreps"] += 1
logging.debug("Final preposition: " + this_type + " " + t1_type)
if this_tag in ['RB', 'RBR', 'RBS']:
paper_dict["25Adverbs"] += 1
if this_tag in ['NN', 'NNP', 'NNS', 'NNPS']:
paper_dict["26Nouns"] += 1
if this_tag == 'IN':
paper_dict["28Preps"] += 1
if (this_tag in ["JJ", "JJR", "JJS"]
and t1_tag in ["JJ", "JJR", "JJS", "NN", "NNS",
"NNP", "NNPS"]):
paper_dict["30AttribAdj"] += 1
sentence_counter += 1
paper_dict["29TTRatio"] = len(first400TTypes)/4
paper_dict["27WordLength"] = aggWordLength/paper_dict["A_words"]
paper_dict["28Preps"] = paper_dict["28Preps"] - paper_dict["24FinalPreps"]
#Normalize values
#First ID keys that don't get normalized
noNormalize = ["A_papernum", "A_tokens", "A_sents", "A_words", "01Gender",
"27WordLength", "29TTRatio"]
normalTokens = paper_dict["A_tokens"]
#print("Number of tokens: " + str(normalTokens))
for key, val in paper_dict.items():
#print(key + " Value: " + str(val))
if key not in noNormalize:
#print(key + " needs normalizing")
paper_dict[key] = val/normalTokens*1000
#print(paper_dict[key])
# print(i)
#Now we need to normalize the data.
#1. Multiply value of each of the following types of feature by 1/#tokens: functionwords, postags.
#2. Multiply value of each of trigrams 4/#tokens
#3. Multiply value of each of bigrams 2/#tokens
# tokensfactor = 1 / paper_dict["A_tokens"]
# bigramsfactor = tokensfactor * 2
# trigramsfactor = tokensfactor * 4
# print("Factors: Tokens = " + str(tokensfactor) + " Bigrams = " + str(bigramsfactor) + " Trigrams = " + str(trigramsfactor))
# for i in paper_dict.keys():
# if i.startswith("F_") or i.startswith("POS_"):
# paper_dict[i] = paper_dict[i] * tokensfactor
# if i.startswith("Bi_"):
# paper_dict[i] = paper_dict[i] * bigramsfactor
# if i.startswith("Tri_"):
# paper_dict[i] = paper_dict[i] * trigramsfactor
#
#Use following three lines for debug only
#print "Feature set:"
#for key in sorted(paper_dict.iterkeys()):
# print "%s: %s" % (key, paper_dict[key])
papers.append(paper_dict)
with open(results_file, "wb") as resPickle:
pickle.dump(papers, resPickle)
with open(csv_out_file, "w") as csv_out:
headers = list(papers[1].keys())
csvwriter = csv.DictWriter(csv_out, delimiter=',', fieldnames=headers)
csvwriter.writerow(dict((fn,fn) for fn in headers))
for row in papers:
csvwriter.writerow(row)
process("Nonfacttext")
process("Facttext")
process("Fulltext")
| gpl-3.0 |
ikassi/menpo | setup.py | 1 | 2496 | import sys
from setuptools import setup, find_packages
from Cython.Build import cythonize
import numpy as np
from buildhelpers.shaders import build_c_shaders
# ---- C/C++ EXTENSIONS ---- #
cython_modules = ["menpo/geodesics/kirsanov.pyx",
"menpo/shape/mesh/cpptrimesh.pyx",
"menpo/shape/mesh/normals.pyx",
"menpo/interpolation/cinterp.pyx",
"menpo/transform/fastpwa.pyx",
"menpo/features/cppimagewindowiterator.pyx"]
cython_exts = cythonize(cython_modules, nthreads=2, quiet=True)
# ---- OPENGL C EXTENSIONS ---- #
# first, convert the plain text shaders into C string literals
build_c_shaders()
opengl_c_cython_modules = ["menpo/rasterize/copengl.pyx"]
opengl_c_exts = cythonize(opengl_c_cython_modules, nthreads=2, quiet=True)
# unfortunately, OpenGL is just different on OS X/Linux
if sys.platform.startswith('linux'):
for c_ext in opengl_c_exts:
c_ext.libraries += ['GL', 'GLU', 'glfw']
elif sys.platform == 'darwin':
for c_ext in opengl_c_exts:
c_ext.libraries += ['glfw3']
# TODO why does it compile without these on OS X?!
#c_ext.extra_compile_args += ['-framework OpenGL',
# '-framework Cocoa', '-framework IOKit',
# '-framework CoreVideo']
setup(name='menpo',
version='0.2',
description='iBUG Facial Modelling Toolkit',
author='James Booth',
author_email='[email protected]',
include_dirs=[np.get_include()],
ext_modules=cython_exts + opengl_c_exts,
packages=find_packages(),
install_requires=[# Core
'numpy>=1.8.0',
'scipy>=0.12.0',
'Cython>=0.20.1', # req on OS X Mavericks
# Image
'Pillow>=2.0.0',
'scikit-image>=0.8.2',
# 3D import
'menpo-pyvrml97==2.3.0a4',
'cyassimp>=0.1.2',
# Visualization
'matplotlib>=1.2.1',
# Need to decide if this is really needed
'decorator>=3.4.0',
# Docs and testing
'Sphinx>=1.2b1',
'numpydoc>=0.4',
'nose>=1.3.0'],
extras_require={'3d': 'mayavi>=4.3.0'}
)
| bsd-3-clause |
J4sp3r/damrobot | Project/lib/util.py | 1 | 1474 | #!/usr/local/bin/python
import cv2,os
import numpy as np
import matplotlib.pyplot as plt
from lib import log
def imshow(img):
cv2.namedWindow("preview")
cv2.imshow("preview",img)
rval = True
while rval:
key = cv2.waitKey(27)
if key == 27: # exit on ESC
break
cv2.destroyWindow("preview")
def imshow2(img):
plt.subplot(111),plt.imshow(img),plt.title('Output')
plt.show()
def imgresize(img,w,h):
return cv2.resize(img,(w, h), interpolation = cv2.INTER_CUBIC)
def getpos(img,ratio):
width = img.shape[0]
width2 = int(width*ratio)
height = img.shape[1]
height2 = int(height*ratio)
pts1 = np.float32([[(width-width2)*0.5-1,(height-height2)*0.5-1],[(width-width2)*0.5+width2-1,(height-height2)*0.5-1],[(width-width2)*0.5-1,(height-height2)*0.5+height2-1],[(width-width2)*0.5+width2-1,(height-height2)*0.5+height2-1]])
pts2 = np.float32([[0,0],[width2-1,0],[0,height2-1],[width2-1,height2-1]])
retval = cv2.getPerspectiveTransform(pts1,pts2)
warp = cv2.warpPerspective(img,retval,(width2,height2))
return warp
def board2file(board,file):
f = open(file,'w')
for x in range(8):
rule = ""
first = True
for y in range(8):
if first:
first = False
else:
rule += ","
rule += str(board[x][y])
if x < 7:
rule += "\n"
f.write(rule)
f.close()
def state(path,state):
f = open(path + "\\files\\state.txt",'w')
f.write(state)
f.close()
def newboard(path):
f = open(path + "\\files\\newbord.txt",'w')
f.write("true")
f.close() | mit |
soulmachine/scikit-learn | sklearn/pipeline.py | 8 | 16439 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
# One round of beers on me if someone finds out why the backslash
# is needed in the Attributes section so as not to upset sphinx.
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implements fit and transform methods.
The final estimator needs only implements fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps a the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
def predict_log_proba(self, X):
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform."""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
def inverse_transform(self, X):
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
Karel-van-de-Plassche/bokeh | bokeh/sampledata/periodic_table.py | 5 | 3181 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a periodic table data set. It exposes an attribute ``elements``
which is a pandas Dataframe with the following fields:
.. code-block:: python
elements['atomic Number'] (units: g/cm^3)
elements['symbol']
elements['name']
elements['atomic mass'] (units: amu)
elements['CPK'] (convention for molecular modeling color)
elements['electronic configuration']
elements['electronegativity'] (units: Pauling)
elements['atomic radius'] (units: pm)
elements['ionic radius'] (units: pm)
elements['van der waals radius'] (units: pm)
elements['ionization enerygy'] (units: kJ/mol)
elements['electron affinity'] (units: kJ/mol)
elements['phase'] (standard state: solid, liquid, gas)
elements['bonding type']
elements['melting point'] (units: K)
elements['boiling point'] (units: K)
elements['density'] (units: g/cm^3)
elements['type'] (see below)
elements['year discovered']
elements['group']
elements['period']
where element types are:
actinoid
alkali metal
alkaline earth metal
halogen,
lanthanoid
metal
metalloid
noble gas
nonmetal
transition metalloid
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'elements',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
elements = package_csv('periodic_table', 'elements.csv')
| bsd-3-clause |
lukeiwanski/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans.py | 15 | 11087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of `Estimator` API (deprecated).
This module is deprecated. Please use
@{tf.contrib.factorization.KMeansClustering} instead of
@{tf.contrib.learn.KMeansClustering}. It has a similar interface, but uses the
@{tf.estimator.Estimator} API instead of @{tf.contrib.learn.Estimator}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.deprecation import deprecated
_USE_TF_CONTRIB_FACTORIZATION = (
'Please use tf.contrib.factorization.KMeansClustering instead of'
' tf.contrib.learn.KMeansClustering. It has a similar interface, but uses'
' the tf.estimator.Estimator API instead of tf.contrib.learn.Estimator.')
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
| apache-2.0 |
amnona/heatsequer | heatsequer/experiment/sorting.py | 1 | 14671 | #!/usr/bin/env python
"""
heatsequer experiment sorting module
"""
# amnonscript
__version__ = "0.9"
import heatsequer as hs
import numpy as np
import copy
from sklearn.preprocessing import scale
from scipy import cluster,spatial,stats
def sortbacteria(exp,inplace=False,logit=True):
"""
sort bacteria according to taxonomy (alphabetically)
input:
exp : experiment
the experiment to sort
inplace : bool
True to sort in place (replace current experiment), False to create a new experiment
logit : bool
True to add to command log, False to skip (if called from other heatsequer function)
output:
newexp : experiment
The sorted experiment (by taxonomy name)
"""
params=locals()
hs.Debug(1,'Sorting %d bacteria by taxonomy' % len(exp.seqs))
tax=exp.tax
svals,sidx=hs.isort(tax)
newexp=hs.reorderbacteria(exp,sidx,inplace=inplace)
if logit:
newexp.filters.append('sorted bacteria by taxonomy')
hs.addcommand(newexp,"sortbacteria",params=params,replaceparams={'exp':exp})
return newexp
def clusterbacteria(exp,minreads=0,uselog=True):
"""
cluster bacteria in an experiment according to similar behavior
input:
exp : Experiment
minreads : int
the minimal number of reads to keep before clustering (to make faster)
uselog : bool
True to log transform reads for clustering (before normalizing), false to use full reads
output:
newexp : Experiment
the filtered and clustered experiment
"""
params=locals()
if exp.sparse:
exp=hs.copyexp(exp,todense=True)
newexp=hs.filterminreads(exp,minreads,logit=False)
# normalize each row (bacteria) to sum 1
dat=copy.copy(newexp.data)
if uselog:
dat[dat<=2]=2
dat=np.log2(dat)
dat=scale(dat,axis=1,copy=False)
# cluster
dm=spatial.distance.pdist(dat,metric='euclidean')
ll=cluster.hierarchy.single(dm)
order=cluster.hierarchy.leaves_list(ll)
newexp=hs.reorderbacteria(newexp,order)
hs.addcommand(newexp,"clusterbacteria",params=params,replaceparams={'exp':exp})
newexp.filters.append("cluster bacteria minreads=%d" % minreads)
return newexp
def clustersamples(exp,minreads=0):
"""
cluster samples in an experiment according to similar behavior
input:
exp :Experiment
minreads : int
the minimal original number of reads per sample to keep it
output:
newexp : Experiment
the filtered and clustered experiment
"""
params=locals()
if exp.sparse:
exp=hs.copyexp(exp,todense=True)
newexp=hs.filterorigreads(exp,minreads)
# normalize each row (bacteria) to sum 1
dat=copy.copy(newexp.data)
dat=np.transpose(dat)
dat[dat<=2]=2
dat=np.log2(dat)
# cluster
dm=spatial.distance.pdist(dat,metric='braycurtis')
ll=cluster.hierarchy.single(dm)
order=cluster.hierarchy.leaves_list(ll)
newexp=hs.reordersamples(newexp,order)
hs.addcommand(newexp,"clustersamples",params=params,replaceparams={'exp':exp})
newexp.filters.append("cluster samples minreads=%d" % minreads)
return newexp
def sortsamples(exp,field,numeric=False,logit=True):
"""
sort samples according to field
input:
exp : Experiment
field : string
name of the field to sort by
numeric : bool
True for numeric values in field, false for text
output:
newexp : Experiment
the sorted experiment
"""
params=locals()
fvals=hs.getfieldvals(exp,field)
if numeric:
fvals=hs.tofloat(fvals)
svals,sidx=hs.isort(fvals)
newexp=hs.reordersamples(exp,sidx)
if logit:
hs.addcommand(newexp,"sortsamples",params=params,replaceparams={'exp':exp})
newexp.filters.append('sorted samples by field %s' % field)
return newexp
def sortbyfreq(expdat,field=False,value=False,exact=False,exclude=False,logscale=True,useabs=False,reverse=False):
"""
sort bacteria in experiment according to frequency
sorting is performed based on a subset of samples (field/val/exact) and then
all the experiment is sorted according to them
input:
expdat : Experiment
field : string
name of the field to filter samples for freq. sorting or False for all samples
value : string
value of samples to use for the freq. sorting
exact : bool
is the value exact or partial string
exclude : bool
True to sort on all samples except the field/value ones, False to sort only on field/value samples (default=False)
logscale : bool
True (default) to use log2 transform for frequencies before mean and sorting, False to use original values
useabs : bool
True to sort by absolute value of freq, False (default) to sort by freq
reverse: bool
False (default) to have high freq. bacteria last, True to have high freq bacteria first
output:
newexp : Experiment
the experiment with bacteria sorted according to subgroup freq.
"""
params=locals()
if field:
texp=hs.filtersamples(expdat,field,value,exact=exact,exclude=exclude)
else:
texp=hs.copyexp(expdat)
if logscale:
texp.data[texp.data<2]=2
texp.data=np.log2(texp.data)
if useabs:
meanvals=hs.mean(np.abs(texp.data),axis=1)
else:
meanvals=hs.mean(texp.data,axis=1)
svals,sidx=hs.isort(meanvals)
if reverse:
sidx=sidx[::-1]
newexp=hs.reorderbacteria(expdat,sidx)
newexp.filters.append("sort by freq field=%s value=%s" % (field,value))
hs.addcommand(newexp,"sortbyfreq",params=params,replaceparams={'expdat':expdat})
return newexp
def sortbyvariance(expdat,field=False,value=False,exact=False,norm=False):
"""
sort bacteria by their variance
sorting is performed based on a subset of samples (field/val/exact) and then
all the experiment is sorted according to them
input:
expdat : Experiment
field : string
name of the field to filter samples for freq. sorting or False for all samples
value : string
value of samples to use for the freq. sorting
exact : bool
is the value exact or partial string
norm : bool
- False to sort by varinace, True to sort by variance/mean
output:
newexp : Experiment
the experiment with bacteria sorted according to subgroup freq.
"""
params=locals()
if field:
texp=hs.filtersamples(expdat,field,value,exact=exact)
else:
texp=copy.deepcopy(expdat)
svals=np.std(texp.data,axis=1)
if norm:
svals=svals/np.mean(texp.data,axis=1)
svals,sidx=hs.isort(svals)
newexp=hs.reorderbacteria(expdat,sidx)
newexp.filters.append("sort by variance field=%s value=%s normalize=%s" % (field,value,norm))
hs.addcommand(newexp,"sortbyvariance",params=params,replaceparams={'expdat':expdat})
return newexp
def sortbygroupdiff(expdat,field,val1,val2):
"""
sort bacteria in the experiment by the difference in the mean between the 2 groups (val1,val2 in field)
input:
expdat
field - the name of the field for the 2 groups
val1,val2 - the values for the 2 groups
output:
newexp - the experiment with sorted bacteria
"""
params=locals()
exp1=hs.filtersamples(expdat,field,val1,exact=True)
exp2=hs.filtersamples(expdat,field,val2,exact=True)
m1=np.mean(np.log2(exp1.data+2),axis=1)
m2=np.mean(np.log2(exp2.data+2),axis=1)
diff=(m1-m2)/(m1+m2+20)
sv,si=hs.isort(diff)
newexp=hs.reorderbacteria(expdat,si)
newexp.filters.append("sort by group difference field=%s val1=%s val2=%s" % (field,val1,val2))
hs.addcommand(newexp,"sortbygroupdiff",params=params,replaceparams={'expdat':expdat})
return newexp
def sortcorrelation(expdat,method='all'):
"""
EXPERIMENTAL
sort bacteria according to highest correlation/anti-correlation
input:
expdat
method:
pres - use correlation only on samples where present in one of the two sequnences
all - use correlation on all samples (default)
output:
newexp - the experiment with bacteria sorted by correlation (each time next bacteria the most abs(corr) to the current bacteria)
"""
params=locals()
cdat=copy.copy(expdat.data)
cdat[cdat<=2]=2
cdat=np.log2(cdat)
cdat=scale(cdat,axis=1,copy=False,with_mean=False)
hs.Debug(6,"Calculating correlation matrix")
cmat=np.corrcoef(cdat)
hs.Debug(6,"sorting bacteria")
cmat=np.abs(cmat)
cmat-=np.identity(len(expdat.seqs))
maxpos=np.argmax(cmat)
maxpos=np.unravel_index(maxpos,np.shape(cmat))
order=[maxpos[0]]
ubact=np.arange(len(expdat.seqs))
ubact=np.delete(ubact,maxpos[0])
maxpos=maxpos[0]
while len(ubact)>0:
cdat=cmat[ubact,maxpos]
cdat=cdat.flatten()
maxpos=np.argmax(cdat)
order.append(ubact[maxpos])
ubact=np.delete(ubact,maxpos)
newexp=hs.reorderbacteria(expdat,order)
newexp.filters.append("correlation sort")
hs.addcommand(newexp,"sortcorrelation",params=params,replaceparams={'expdat':expdat})
return newexp
############
# add sort by center of mass (for time/1d series)
###########
def sortbycentermass(expdat,field=False,numeric=True,uselog=True):
"""
sort bacteria in the experiment according to a 1d gradient by calculating the center of mass
input:
expdat
field : string
the name of the field to sort by or False to skip sorting
numeric : bool
True if the sort field is numeric (ignored if no sort field)
uselog : bool
True to log transform the data before mass center calculation
output:
newexp - the experiment with sorted bacteria
"""
params=locals()
if field:
newexp=hs.sortsamples(expdat,field,numeric=numeric)
else:
newexp=hs.copyexp(expdat)
dat=newexp.data
if uselog:
dat[dat<1]=1
dat=np.log2(dat)
cm=[]
multpos=np.arange(len(newexp.samples))
for cseqind in range(len(newexp.seqs)):
cm.append(np.dot(dat[cseqind,:],multpos)/np.sum(dat[cseqind,:]))
sv,si=hs.isort(cm)
newexp=hs.reorderbacteria(expdat,si)
newexp.filters.append("sort by center of mass field=%s, uselog=%s" % (field,uselog))
hs.addcommand(newexp,"sortbycentermass",params=params,replaceparams={'expdat':expdat})
return newexp
def sortbysign(expdat,field=False,value='',exclude=False,exact=True,maxfval=0.2):
"""
sort bacteria in the experiment based on the number of positive/negative samples
(ignoring nans)
input:
expdat : Experiment
field,value,exclude,exact : name of field and value of field in order to sort based only on these samples
or field=False for all samples (default)
maxfval - the maximal f-value
output:
newexp : Experiment
sorted by difference between positive/negative
"""
params=locals()
if field:
texp=hs.filtersamples(expdat,field,value,exact=exact,exclude=exclude)
else:
texp=hs.copyexp(expdat)
texp.data=np.sign(texp.data)
numpos=np.nansum(texp.data>0,axis=1)
numneg=np.nansum(texp.data<0,axis=1)
pval=np.ones(len(numpos))
for cpos in range(len(pval)):
if numpos[cpos]==0 and numneg[cpos]==0:
continue
pval1=stats.binom.cdf(numpos[cpos],numpos[cpos]+numneg[cpos],0.5)
pval2=stats.binom.cdf(numneg[cpos],numpos[cpos]+numneg[cpos],0.5)
pval[cpos]=np.min([pval1,pval2])
signs=np.nanmean(texp.data,axis=1)
fval=hs.fdr(pval)
keep=np.where(np.array(fval)<=maxfval)[0]
newexp=hs.reorderbacteria(expdat,keep)
signs=signs[keep]
si=np.argsort(signs)
newexp=hs.reorderbacteria(newexp,si)
newexp.filters.append("sort by sign field %s max-f-val %f" % (field,maxfval))
hs.addcommand(newexp,"sortbysign",params=params,replaceparams={'expdat':expdat})
return newexp
def sortbyseqsfirst(expdat,seqs,addline=True):
"""
sort bacteria in expdat by first putting the bacteria from seqs (according to the order there)
and then all the other expdat bacteria
input:
expdat : Experiment
The experiment to sort the bacteria in
seqs : list of squences ('ACGT')
the bacteria to order first
addline : bool
True (default) to add a horizontal line to the plot info. False to not add
output:
newexp : Experiment
similar to expdat but bacteria in seqs appearing first
"""
params=locals()
newseqs=copy.copy(seqs)
for cseq in expdat.seqs:
if cseq not in seqs:
newseqs.append(cseq)
newexp=hs.filterseqs(expdat,newseqs)
if addline:
newexp.hlines.append(len(seqs))
newexp.filters.append("sort by sequence list first based on %d sequences" % len(seqs))
hs.addcommand(newexp,"sortbyseqsfirst",params=params,replaceparams={'expdat':expdat})
return newexp
def reversebacteria(expdat):
"""
reverse the order of bacteria in the experiment
input:
expdat : Experiment
the experiment to reorder
ourput:
newexp : Experiment
with bacteria order reversed (last bacteria first)
"""
params=locals()
newexp=hs.reorderbacteria(expdat,np.arange(len(expdat.seqs)-1,-1,-1))
newpos=[]
for linepos in newexp.hlines:
newpos.append(len(expdat.seqs)-linepos)
newexp.hlines=newpos
newexp.filters.append("reverse bacteria order")
hs.addcommand(newexp,"reversebacteria",params=params,replaceparams={'expdat':expdat})
return newexp
def sortbyexp(expdat,sortexp):
"""
sort the bacteria in expdat by first putting the bacteria in sortexp (in the order there) and then the other bacteria in expdat
input:
expdat : Experiment
the experiment to sort
sortexp : Experiment
the experiment used to sort the bacteria first
output:
newexp : experiment
sorted with sortexp bacteria first, then the others
"""
params=locals()
hs.Debug(2,'sort by exp')
numfromseqs=0
seqs=[]
for cseq in sortexp.seqs:
if cseq in expdat.seqdict:
seqs.append(cseq)
numfromseqs+=1
for cseq in expdat.seqs:
if cseq not in sortexp.seqdict:
seqs.append(cseq)
newexp=hs.filterseqs(expdat,seqs)
newexp.hlines.append(len(sortexp.seqs))
hs.Debug(6,'found %d out of %d sequences and put them first' % (numfromseqs,len(sortexp.seqs)))
newexp.filters.append("sort bacteria using experiment %s" % sortexp.studyname)
hs.addcommand(newexp,"sortbyexp",params=params,replaceparams={'expdat':expdat,'sortexp':sortexp})
return newexp
def sortsamplesbybactfreq(expdat,seq):
"""
sort samples based on the frequency of sequence seq
input:
expdat : Experiment
seq : str (ACGT)
output:
newexp : Experiment
with samples sorted based on frequency of sequence seq (from low to high)
"""
params=locals()
seqdat=expdat.data[expdat.seqdict[seq],:]
si=np.argsort(seqdat)
newexp=hs.reordersamples(expdat,si)
newexp.filters.append("sort samples by bacteria frequency")
hs.addcommand(newexp,"sortsamplesbybactfreq",params=params,replaceparams={'expdat':expdat})
return newexp
def sortbyprev(expdat,minreads=1,reverse=False):
"""
sort bacteria in experiment according to prevalence
expdat : Experiment
minreads : float
minimal number of reads in order to call a bacteria present
reverse : book
True to reverse the order
output:
newexp : Experiment
the experiment with bacteria sorted according to subgroup freq.
"""
params=locals()
texp=hs.copyexp(expdat)
texp.data= texp.data>=minreads
meanvals=hs.mean(texp.data,axis=1)
svals,sidx=hs.isort(meanvals)
if reverse:
sidx=sidx[::-1]
newexp=hs.reorderbacteria(expdat,sidx)
newexp.filters.append("sort by prevalence")
hs.addcommand(newexp,"sortbyprev",params=params,replaceparams={'expdat':expdat})
return newexp
| bsd-3-clause |
barney-NG/pyCAMTracker | src/filterpy/kalman/tests/test_fm.py | 2 | 2152 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy.random as random
import numpy as np
import matplotlib.pyplot as plt
from filterpy.kalman import FadingKalmanFilter
DO_PLOT = False
def test_noisy_1d():
f = FadingKalmanFilter (5., dim_x=2, dim_z=1)
f.X = np.array([[2.],
[0.]]) # initial state (location and velocity)
f.F = np.array([[1.,1.],
[0.,1.]]) # state transition matrix
f.H = np.array([[1.,0.]]) # Measurement function
f.P *= 1000. # covariance matrix
f.R = 5 # state uncertainty
f.Q = 0.0001 # process uncertainty
measurements = []
results = []
zs = []
for t in range (100):
# create measurement = t plus white noise
z = t + random.randn()*20
zs.append(z)
# perform kalman filtering
f.update(z)
f.predict()
# save data
results.append (f.X[0,0])
measurements.append(z)
# now do a batch run with the stored z values so we can test that
# it is working the same as the recursive implementation.
# give slightly different P so result is slightly different
f.X = np.array([[2.,0]]).T
f.P = np.eye(2)*100.
m,c,_,_ = f.batch_filter(zs,update_first=False)
# plot data
if DO_PLOT:
p1, = plt.plot(measurements,'r', alpha=0.5)
p2, = plt.plot (results,'b')
p4, = plt.plot(m[:,0], 'm')
p3, = plt.plot ([0,100],[0,100], 'g') # perfect result
plt.legend([p1,p2, p3, p4],
["noisy measurement", "KF output", "ideal", "batch"], loc=4)
plt.show()
if __name__ == "__main__":
DO_PLOT = True
test_noisy_1d() | mit |
bnaul/scikit-learn | sklearn/metrics/_plot/tests/test_plot_confusion_matrix.py | 3 | 11535 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_array_equal
from sklearn.compose import make_column_transformer
from sklearn.datasets import make_classification
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, SVR
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved
pytestmark = pytest.mark.filterwarnings(
"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:"
"matplotlib.*")
@pytest.fixture(scope="module")
def n_classes():
return 5
@pytest.fixture(scope="module")
def data(n_classes):
X, y = make_classification(n_samples=100, n_informative=5,
n_classes=n_classes, random_state=0)
return X, y
@pytest.fixture(scope="module")
def fitted_clf(data):
return SVC(kernel='linear', C=0.01).fit(*data)
@pytest.fixture(scope="module")
def y_pred(data, fitted_clf):
X, _ = data
return fitted_clf.predict(X)
def test_error_on_regressor(pyplot, data):
X, y = data
est = SVR().fit(X, y)
msg = "plot_confusion_matrix only supports classifiers"
with pytest.raises(ValueError, match=msg):
plot_confusion_matrix(est, X, y)
def test_error_on_invalid_option(pyplot, fitted_clf, data):
X, y = data
msg = (r"normalize must be one of \{'true', 'pred', 'all', "
r"None\}")
with pytest.raises(ValueError, match=msg):
plot_confusion_matrix(fitted_clf, X, y, normalize='invalid')
@pytest.mark.parametrize("with_labels", [True, False])
@pytest.mark.parametrize("with_display_labels", [True, False])
def test_plot_confusion_matrix_custom_labels(pyplot, data, y_pred, fitted_clf,
n_classes, with_labels,
with_display_labels):
X, y = data
ax = pyplot.gca()
labels = [2, 1, 0, 3, 4] if with_labels else None
display_labels = ['b', 'd', 'a', 'e', 'f'] if with_display_labels else None
cm = confusion_matrix(y, y_pred, labels=labels)
disp = plot_confusion_matrix(fitted_clf, X, y,
ax=ax, display_labels=display_labels,
labels=labels)
assert_allclose(disp.confusion_matrix, cm)
if with_display_labels:
expected_display_labels = display_labels
elif with_labels:
expected_display_labels = labels
else:
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [str(name)
for name in expected_display_labels]
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
@pytest.mark.parametrize("normalize", ['true', 'pred', 'all', None])
@pytest.mark.parametrize("include_values", [True, False])
def test_plot_confusion_matrix(pyplot, data, y_pred, n_classes, fitted_clf,
normalize, include_values):
X, y = data
ax = pyplot.gca()
cmap = 'plasma'
cm = confusion_matrix(y, y_pred)
disp = plot_confusion_matrix(fitted_clf, X, y,
normalize=normalize,
cmap=cmap, ax=ax,
include_values=include_values)
assert disp.ax_ == ax
if normalize == 'true':
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == 'pred':
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == 'all':
cm = cm / cm.sum()
assert_allclose(disp.confusion_matrix, cm)
import matplotlib as mpl
assert isinstance(disp.im_, mpl.image.AxesImage)
assert disp.im_.get_cmap().name == cmap
assert isinstance(disp.ax_, pyplot.Axes)
assert isinstance(disp.figure_, pyplot.Figure)
assert disp.ax_.get_ylabel() == "True label"
assert disp.ax_.get_xlabel() == "Predicted label"
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [str(name)
for name in expected_display_labels]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
if include_values:
assert disp.text_.shape == (n_classes, n_classes)
fmt = '.2g'
expected_text = np.array([format(v, fmt) for v in cm.ravel(order="C")])
text_text = np.array([
t.get_text() for t in disp.text_.ravel(order="C")])
assert_array_equal(expected_text, text_text)
else:
assert disp.text_ is None
def test_confusion_matrix_display(pyplot, data, fitted_clf, y_pred, n_classes):
X, y = data
cm = confusion_matrix(y, y_pred)
disp = plot_confusion_matrix(fitted_clf, X, y, normalize=None,
include_values=True, cmap='viridis',
xticks_rotation=45.0)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 45.0)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
disp.plot(cmap='plasma')
assert disp.im_.get_cmap().name == 'plasma'
disp.plot(include_values=False)
assert disp.text_ is None
disp.plot(xticks_rotation=90.0)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 90.0)
disp.plot(values_format='e')
expected_text = np.array([format(v, 'e') for v in cm.ravel(order="C")])
text_text = np.array([
t.get_text() for t in disp.text_.ravel(order="C")])
assert_array_equal(expected_text, text_text)
def test_confusion_matrix_contrast(pyplot):
# make sure text color is appropriate depending on background
cm = np.eye(2) / 2
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.gray)
# diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
disp.plot(cmap=pyplot.cm.gray_r)
# diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
# Regression test for #15920
cm = np.array([[19, 34], [32, 58]])
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.Blues)
min_color = pyplot.cm.Blues(0)
max_color = pyplot.cm.Blues(255)
assert_allclose(disp.text_[0, 0].get_color(), max_color)
assert_allclose(disp.text_[0, 1].get_color(), max_color)
assert_allclose(disp.text_[1, 0].get_color(), max_color)
assert_allclose(disp.text_[1, 1].get_color(), min_color)
@pytest.mark.parametrize(
"clf", [LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(make_column_transformer((StandardScaler(), [0, 1])),
LogisticRegression())])
def test_confusion_matrix_pipeline(pyplot, clf, data, n_classes):
X, y = data
with pytest.raises(NotFittedError):
plot_confusion_matrix(clf, X, y)
clf.fit(X, y)
y_pred = clf.predict(X)
disp = plot_confusion_matrix(clf, X, y)
cm = confusion_matrix(y, y_pred)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
@pytest.mark.parametrize("colorbar", [True, False])
def test_plot_confusion_matrix_colorbar(pyplot, data, fitted_clf, colorbar):
X, y = data
def _check_colorbar(disp, has_colorbar):
if has_colorbar:
assert disp.im_.colorbar is not None
assert disp.im_.colorbar.__class__.__name__ == "Colorbar"
else:
assert disp.im_.colorbar is None
disp = plot_confusion_matrix(fitted_clf, X, y, colorbar=colorbar)
_check_colorbar(disp, colorbar)
# attempt a plot with the opposite effect of colorbar
disp.plot(colorbar=not colorbar)
_check_colorbar(disp, not colorbar)
@pytest.mark.parametrize("values_format", ['e', 'n'])
def test_confusion_matrix_text_format(pyplot, data, y_pred, n_classes,
fitted_clf, values_format):
# Make sure plot text is formatted with 'values_format'.
X, y = data
cm = confusion_matrix(y, y_pred)
disp = plot_confusion_matrix(fitted_clf, X, y,
include_values=True,
values_format=values_format)
assert disp.text_.shape == (n_classes, n_classes)
expected_text = np.array([format(v, values_format)
for v in cm.ravel()])
text_text = np.array([
t.get_text() for t in disp.text_.ravel()])
assert_array_equal(expected_text, text_text)
def test_confusion_matrix_standard_format(pyplot):
cm = np.array([[10000000, 0], [123456, 12345678]])
plotted_text = ConfusionMatrixDisplay(
cm, display_labels=[False, True]).plot().text_
# Values should be shown as whole numbers 'd',
# except the first number which should be shown as 1e+07 (longer length)
# and the last number will be shown as 1.2e+07 (longer length)
test = [t.get_text() for t in plotted_text.ravel()]
assert test == ['1e+07', '0', '123456', '1.2e+07']
cm = np.array([[0.1, 10], [100, 0.525]])
plotted_text = ConfusionMatrixDisplay(
cm, display_labels=[False, True]).plot().text_
# Values should now formatted as '.2g', since there's a float in
# Values are have two dec places max, (e.g 100 becomes 1e+02)
test = [t.get_text() for t in plotted_text.ravel()]
assert test == ['0.1', '10', '1e+02', '0.53']
@pytest.mark.parametrize("display_labels, expected_labels", [
(None, ["0", "1"]),
(["cat", "dog"], ["cat", "dog"]),
])
def test_default_labels(pyplot, display_labels, expected_labels):
cm = np.array([[10, 0], [12, 120]])
disp = ConfusionMatrixDisplay(cm, display_labels=display_labels).plot()
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
assert_array_equal(x_ticks, expected_labels)
assert_array_equal(y_ticks, expected_labels)
| bsd-3-clause |
kuntzer/binfind | scripts/dang_stars_2_plot.py | 1 | 6564 | import numpy as np
from matplotlib import pyplot as plt
import itertools
import os
import binfind.utils as u
import figures
white_plot = False
if white_plot:
# Include this for presentations:
import matplotlib.font_manager as fm
from matplotlib import rc
prop = fm.FontProperties(fname='/usr/share/texmf/fonts/opentype/public/tex-gyre/texgyreadventor-regular.otf')
#rc('font', **{'fname':'/usr/share/texmf/fonts/opentype/public/tex-gyre/texgyreadventor-regular.otf'})
rc('font', **{'family':'TeX Gyre Adventor','size':14})
#### End
else:
figures.set_fancy(txtsize=18)
reload_data = False
e_req = 0.01#2e-4
r_req = 0.05#1e-3
n_angsep = 15
n_con = 15
# Minimum separation of the stars to be qualified as binaries
crits_angsep = np.linspace(1, 15, n_angsep)
# Max contrast to be qualified as binaries
crits_contrast = np.linspace(0.1, 1.5, n_con)#np.linspace(0.1, 0.015, 7)
save = True
outdir = 'data/binfind_percent_meas/dang_stars'
e1_deforms = []
e2_deforms = []
r2_deforms = []
if reload_data:
criteria = list(itertools.product(*[crits_angsep, crits_contrast]))
for iix, (crit_angsep, crit_contrast) in enumerate(criteria):
ca, cc, e1_deform, e2_deform, r2_deform = u.readpickle(os.path.join(outdir, 'dang_stars_{:d}_{:1.1f}.pkl'.format(int(crit_angsep), crit_contrast)))
e1_deforms.append(np.percentile(e1_deform, [95])[0])
e2_deforms.append(np.percentile(e2_deform, [95])[0])
r2_deforms.append(np.percentile(r2_deform, [95])[0])
print iix, crit_angsep, crit_contrast, e1_deforms[-1], e2_deforms[-1], r2_deforms[-1]
#
e1_deforms = np.asarray(e1_deforms)
e2_deforms = np.asarray(e2_deforms)
r2_deforms = np.asarray(r2_deforms)
e1_deform = e1_deforms.reshape([n_angsep, n_con])
e2_deform = e2_deforms.reshape([n_angsep, n_con])
r2_deform = r2_deforms.reshape([n_angsep, n_con])
u.writepickle([e1_deform, e2_deform, r2_deform], os.path.join(outdir, 'resume_dang_stars.pkl'))
else:
e1_deform, e2_deform, r2_deform = u.readpickle(os.path.join(outdir, 'resume_dang_stars.pkl'))
# Let's start by constructing the meshgrid and then the data variable
x = crits_angsep
y = crits_contrast
dx = (x[1]-x[0])/2.
dy = (y[1]-y[0])/2.
x = np.hstack([x, x[-1] + 2.*dx])
y = np.hstack([y, y[-1] + 2.*dy])
X, Y = np.meshgrid(x-dx,y-dy)
fig1 = plt.figure()
# Round up to the nearest 0.05
#vmin = np.round(Zd.min() * 20) / 20
#vmax = np.round(Zd.max() * 20) / 20
CS = plt.pcolormesh(X, Y, e1_deform.T, cmap=plt.get_cmap("inferno_r"))#, vmin=vmin, vmax=vmax)
#plt.axis([X.min(),X.max(),Y.min(),Y.max()])
#plt.xticks(x[:-1][::2])
#plt.yticks(y[:-1])
cbar = plt.colorbar(CS)#, ticks=np.linspace(0,1,21))
cbar.set_label(r"$\Delta e_1$")
plt.xlabel(r"$\mathrm{Angular\ separation\ [mas]}$")
plt.ylabel(r"$\mathrm{Contrast\ [mag]}$")
plt.axis([X.min(),X.max(),Y.min(),Y.max()])
fig2 = plt.figure()
# Round up to the nearest 0.05
#vmin = np.round(Zd.min() * 20) / 20
#vmax = np.round(Zd.max() * 20) / 20
CS = plt.pcolormesh(X, Y, e2_deform.T, cmap=plt.get_cmap("inferno_r"))#, vmin=vmin, vmax=vmax)
#plt.axis([X.min(),X.max(),Y.min(),Y.max()])
#plt.xticks(x[:-1][::2])
#plt.yticks(y[:-1])
cbar = plt.colorbar(CS)#, ticks=np.linspace(0,1,21))
cbar.set_label(r"$\Delta e_2$")
plt.xlabel(r"$\mathrm{Angular\ separation\ [mas]}$")
plt.ylabel(r"$\mathrm{Contrast\ [mag]}$")
plt.axis([X.min(),X.max(),Y.min(),Y.max()])
fig3 = plt.figure()
# Round up to the nearest 0.05
#vmin = np.round(Zd.min() * 20) / 20
#vmax = np.round(Zd.max() * 20) / 20
CS = plt.pcolormesh(X, Y, r2_deform.T, cmap=plt.get_cmap("inferno_r"))#, vmin=vmin, vmax=vmax)
#plt.axis([X.min(),X.max(),Y.min(),Y.max()])
#plt.xticks(x[:-1][::2])
#plt.yticks(y[:-1])
cbar = plt.colorbar(CS)#, ticks=np.linspace(0,1,21))
cbar.set_label(r"$\Delta R^2/R^2$")
plt.xlabel(r"$\mathrm{Angular\ separation\ [mas]}$")
plt.ylabel(r"$\mathrm{Contrast\ [mag]}$")
plt.axis([X.min(),X.max(),Y.min(),Y.max()])
fig4 = plt.figure(figsize=(8.8,7.2))
ax = plt.subplot(111)
# Round up to the nearest 0.05
#vmin = np.round(Zd.min() * 20) / 20
#vmax = np.round(Zd.max() * 20) / 20
mean_budg = (r2_deform.T / r_req + (e1_deform + e2_deform).T / e_req)/3. * 100
mean_budg = ((e1_deform + e2_deform).T)/2. * 1e2
r_tab = r2_deform.T * 1e2
CS1 = plt.pcolormesh(X, Y, r2_deform.T / r_req, cmap=plt.get_cmap("inferno_r"))
vmaxe = np.round(mean_budg.max() / .10, 0) * .10
vmine = np.round(mean_budg.min() / .10, 0) * .10
tt = 1e3
vmaxr = np.round(r_tab.max() * tt, 0) / tt
vminr = np.floor( np.round(r_tab.min() * tt, 0) / tt * 1e2) / 1e2
print vmine
print vminr, vmaxr
print r2_deform.min(), r2_deform.max()
#CS1 = plt.pcolormesh(X, Y, r_tab, cmap=plt.get_cmap("inferno_r"), vmin=vminr, vmax=vmaxr)
CS = plt.pcolormesh(X, Y, mean_budg, cmap=plt.get_cmap("inferno_r"), vmin=vmine, vmax=vmaxe)
plt.xticks(x[:-1][::2])
plt.yticks(y[:-1][::2])
import matplotlib.ticker as ticker
tickse = np.arange(vmine, vmaxe, .5)
ticksr = np.linspace(vminr, vmaxr, 5)
cbar = plt.colorbar(CS, ticks=tickse, pad=0.01)
if white_plot:
commonticks = ["%1.1f%; %1.2f%$" % (tickse[ii], ticksr[ii]) for ii in range(len(tickse))]
else:
commonticks = [r"$\smallskip%1.1f\%%$" % tickse[ii] + "\n" + r"$%1.2f\%%$" % (ticksr[ii]) for ii in range(len(tickse))]
cbar.ax.set_yticklabels(commonticks, ha = 'left')
if white_plot:
cbar.set_label("$\Delta$e/e_0;\,\Delta R^2/R_0^2$", color="white")
plt.xlabel("Maximum angular separation [mas]")
plt.ylabel("Minimum contrast [mag]")
else:
cbar.set_label(r"$\langle\Delta e_i/e_{0,i}\rangle;\,\langle\Delta R^2/R^2_{0}\rangle$")
plt.xlabel(r"$\mathrm{Angular\ separation\ [mas]}$")
plt.ylabel(r"$\mathrm{Contrast\ [mag]}$")
plt.axis([X.min(),X.max(),Y.min(),Y.max()])
if white_plot:
[ ax.spines[s].set_color('white') for s in ax.spines]
ax.xaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='y', colors='white')
#cbar.outline.set_color('w') #set colorbar box color
cbar.ax.yaxis.set_tick_params(color='w') #set colorbar ticks color
cbytick_obj = plt.getp(cbar.ax.axes, 'yticklabels') #tricky
plt.setp(cbytick_obj, color='w')
#cbar.outline.set_color('w')
if save:
figures.savefig(os.path.join(outdir, "dang_e1"), fig1, fancy=True, pdf_transparence=True)
figures.savefig(os.path.join(outdir, "dang_e2"), fig2, fancy=True, pdf_transparence=True)
figures.savefig(os.path.join(outdir, "dang_r2"), fig3, fancy=True, pdf_transparence=True)
figures.savefig(os.path.join(outdir, "dang_summary"), fig4, fancy=True, pdf_transparence=True)
plt.show()
| mit |
tomsib2001/speaker-recognition | main_kp.py | 1 | 6821 | # -*- coding: utf-8 -*-
# traitement global des fichiers wav
import os,numpy as np,octaveIO as oio,string,subprocess
import fisher
import random
from sklearn import mixture
from kernel_perceptron import kp
import pickle
def gmm(x, nbG):
g=mixture.GMM(n_components=nbG)
g.fit(x)
return g
def createDataFiles(nbc, nbG):
if not os.path.exists('data'):
os.makedirs('data')
print "Please add some data, I don't work for free"
else:
mfccs=[] #contiendra l'ensemble des mfccs
dic={}
mu=[]
pi=[]
num=0
for root, dirs, files in os.walk('data'):
#print root,dirs,files
for file in files:
if file.endswith(".wav"):
# print "treating file "+file
nameInDic=os.path.split(root)[-1]
# print "-> "+nameInDic
name=os.path.splitext(file)[0]
fileName = os.path.join(root, name)
wavFile = fileName+'.wav'
mfccFile = fileName+'mfcc.mat' #contient 'c'
#print string.join(['octave','--eval','cepstraux('+'\''+wavFile+'\',\''+mfccFile+'\')'])
subprocess.call(['octave', '--silent', '--eval','cepstraux('+'\''+wavFile+'\',\''+mfccFile+'\')'])
c=oio.retrieve(mfccFile,['c'])[0]
l=np.size(c, 0) #nombre total de frames
nbv=l/nbc
if(not dic.has_key(nameInDic)):
dic[nameInDic]=[]
for j in xrange(nbv):
binf=j*nbc
bsup=(j+1)*nbc
newvf=c[binf:bsup] #vecteur de frames à ajouter
mfccs.append(newvf) #ajout dans l'ensemble global
#Calcul des gmm
g = gmm(newvf, nbG)
mu_j = g.means
pi_j = g.weights
mu.append(mu_j)
pi.append(pi_j)
dic[nameInDic].append(num)
num+=1
return mfccs, mu, pi, dic
def gmms(data, nbG):
c0 = np.concatenate(data)
g=gmm(c0, nbG)
mu0=g.means
sig0=g.covars
return mu0, sig0
def build_labels(name,dic):
size = sum(len(val) for val in dic.itervalues())
res = np.zeros(size)
for key in dic:
if key==name:
val=1
else:
val=-1
for i in dic[key]:
res[i]=val
return res
def make_training_set(name,dic,m):
"""Take m random adversaries to help train <name>"""
nameKeys=dic[name]
advKeys=[dic[key] for key in dic if key<>name]
advs = random.sample(advKeys,min(len(advKeys),m))
return {name : nameKeys, '#mechant' : advs}
#r1: taux de données de name utilisé pour l'apprentissage
#r2*|{données de name utilisé pour l'apprentissage}|: nombre de données hors name pour l'apprentissage
#le reste pour les tests
#leaveOut est une liste de noms de locuteurs éventuels à ne pas inclure dans le training
def make_sets(name, dic, r1, r2,leaveOut=[]):
nameKeys=dic[name]
advKeys_=np.concatenate([dic[key] for key in dic if (key<>name and key not in leaveOut)])
if leaveOut:
advKeysLeaveOut_ = np.concatenate([dic[key] for key in dic if (key<>name and key in leaveOut)])
advKeysLeaveOut=advKeysLeaveOut_.tolist()
else:
advKeysLeaveOut = []
advKeys=advKeys_.tolist()
l1=len(nameKeys)
lp1=int(r1*l1)
l2=len(advKeys)
lp2=min(int(r2*lp1), l2)
advs=random.sample(advKeys,lp2)
for i in advs:
advKeys.remove(i)
testLengthAdv = l2/2 # on ne teste que sur la moitié de ce qui reste pour pouvoir faire un "blind" test sur un troisième ensemble, xTest2
testLengthName = l1/2
xApp=nameKeys[0:lp1]+advs
xTest=nameKeys[lp1:testLengthName]+advKeys[0:testLengthAdv]
xTestPlus=testLengthName-lp1
xTestMinus=testLengthAdv
xTest2 = nameKeys[(testLengthName+1):l1]+advKeys[(testLengthAdv+1):]+advKeysLeaveOut
xTest2Plus=(l1-testLengthName-1)
xTest2Minus=(len(advKeys)-1 -testLengthAdv)+len(advKeysLeaveOut)
yApp=[1 for i in xrange(lp1)]+[-1 for i in xrange(lp2)]
yTest=[1 for i in xrange(xTestPlus)]+[-1 for i in xrange(xTestMinus)]
yTest2=[1 for i in xrange(xTest2Plus)]+[-1 for i in xrange(xTest2Minus)]
return xApp, yApp, xTest, yTest,xTest2,yTest2
def train(name,mu0,sig0,mu,pi, xApp, yApp, verbose=False, stop=100000):
#print('learning ' + name)
# y = build_labels(name,dic)
# x=range(len(mu))
#xApp, yApp, xTest, yTest = make_sets(name, dic, r1, r2)
def k(i,j):
res = fisher.K(xApp,i,j,mu[i],mu[j],sig0,pi[i],pi[j],mu0)
return res
return kp(xApp, yApp, k, verbose=verbose, stop=stop)
def predKP(w, b, mu, pi, mu0, sig0, xTest):
# print 'prediction...'
yPred=[]
T=len(w)
for i in xTest:
#calcule <w,i-eme>
v=[w[j]*fisher.K(w, j, i, mu[j], mu[i], sig0, pi[j], pi[j], mu0) for j in xrange(T)]
tmp=sum(v) + b
yPred.append(tmp)
return yPred
def evalKP(yPred, yTest):
ok = 0
tot = len(yTest)
for i in xrange(tot):
ok+=(yPred[i]*yTest[i] > 0)
return ok/float(tot)
def test_make_sets(r1, r2,leaveOut=[]):
dic={'a':[1, 5, 7, 23, 18, 4, 2],
'b':[8, 9, 10],
'c':[6, 11, 12]}
# print dic
res = make_sets('a', dic, r1, r2,leaveOut)
return dic,res
#dic, res = test_make_sets(0.2, 1.2,['b'])
#print dic,res
nbc=100
# nbG=50
# name='gerra'
# r1=0.6
# r2=2.0
# mfccs, mu, pi, dic = createDataFiles(nbc, nbG)
# print name
# xApp, yApp, xOpp, yOpp, xTest, yTest = make_sets(name, dic, r1, r2)
# print 'GMM sur l\'ensemble des points\n'
# mu0, sig0 = gmms([mfccs[i] for i in xApp], nbG)
# print 'start training'
# w, b, ind = train(name, mu0, sig0, mu, pi, xApp, yApp, verbose=True)
# acc = evalKP(predKP(w, b, mu, pi, mu0, sig0, xTest), yTest)
# print ind, acc
def optimisation(names, nbGs, r1, r2, stop=100000):
res={}
for nbG in nbGs:
mfccs, mu, pi, dic = createDataFiles(nbc, nbG)
pickle.dump(dic, open('data_kp'+str(nbG)+'.dat', 'wb'))
for name in names:
xApp, yApp, xOpp, yOpp, xTest, yTest = make_sets(name, dic, r1, r2)
mu0, sig0 = gmms([mfccs[i] for i in xApp], nbG)
w, b, it = train(name, mu0, sig0, mu, pi, xApp, yApp, stop=stop)
acc = evalKP(predKP(w, b, mu, pi, mu0, sig0, xTest), yTest)
res[(name, nbG)]=acc
print name, nbG, acc, it
return res
res = optimisation(['gerra', 'sarkozy', 'L4', 'thomas'], [10, 30, 40, 50, 70, 100], 0.6, 2.0, stop=300)
pickle.dump(res, open('resultats_kp.dat', 'wb'))
| mit |
degoldschmidt/pytrack-analysis | pytrack_analysis/preprocessing.py | 1 | 1182 | from scipy import signal
import numpy as np
import pandas as pd
import time
def interpolate(*args):
if len(args) > 1:
return [arg.interpolate() for arg in args]
elif len(args) == 1:
return args[0].interpolate()
else:
return None
def to_mm(_data, px2mm):
return _data * px2mm
def gaussian_filter(*args, _len=16, _sigma=1.6):
if len(args) > 1:
return [gaussian_filtered(arg, _len=_len, _sigma=_sigma) for arg in args]
elif len(args) == 1:
return gaussian_filtered(args[0], _len=_len, _sigma=_sigma)
else:
return None
def gaussian_filter_np(_X, _len=16, _sigma=1.6):
return gaussian_filtered(_X, _len=_len, _sigma=_sigma)
def gaussian_filtered(_X, _len=16, _sigma=1.6):
norm = np.sqrt(2*np.pi)*_sigma ### Scipy's gaussian window is not normalized
window = signal.gaussian(_len+1, std=_sigma)/norm
outdf = pd.DataFrame({}, index=_X.index)
for col in _X.columns:
convo = np.convolve(_X[col], window, "same")
## eliminate boundary effects
convo[:_len] = _X[col].iloc[:_len]
convo[-_len:] = _X[col].iloc[-_len:]
outdf[col] = convo
return outdf
| gpl-3.0 |
detrout/debian-statsmodels | examples/incomplete/dates.py | 29 | 1251 | """
Using dates with timeseries models
"""
import statsmodels.api as sm
import pandas as pd
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
astroML/astroML | examples/datasets/plot_sdss_specgals.py | 2 | 1802 | """
SDSS Spectroscopic Galaxy Sample
--------------------------------
This figure shows photometric colors of the SDSS spectroscopic galaxy
sample.
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from astropy.visualization import hist
from astroML.datasets import fetch_sdss_specgals
data = fetch_sdss_specgals()
#------------------------------------------------------------
# plot the RA/DEC in an area-preserving projection
RA = data['ra']
DEC = data['dec']
# convert coordinates to degrees
RA -= 180
RA *= np.pi / 180
DEC *= np.pi / 180
ax = plt.axes(projection='mollweide')
ax.grid()
plt.scatter(RA, DEC, s=1, lw=0, c=data['z'], cmap=plt.cm.copper,
vmin=0, vmax=0.4)
plt.title('SDSS DR8 Spectroscopic Galaxies')
cb = plt.colorbar(cax=plt.axes([0.05, 0.1, 0.9, 0.05]),
orientation='horizontal',
ticks=np.linspace(0, 0.4, 9))
cb.set_label('redshift')
#------------------------------------------------------------
# plot the r vs u-r color-magnitude diagram
u = data['modelMag_u']
r = data['modelMag_r']
rPetro = data['petroMag_r']
plt.figure()
ax = plt.axes()
plt.scatter(u - r, rPetro, s=1, lw=0, c=data['z'], cmap=plt.cm.copper,
vmin=0, vmax=0.4)
plt.colorbar(ticks=np.linspace(0, 0.4, 9)).set_label('redshift')
plt.xlim(0.5, 5.5)
plt.ylim(18, 12.5)
plt.xlabel('u-r')
plt.ylabel('rPetrosian')
#------------------------------------------------------------
# plot a histogram of the redshift
plt.figure()
hist(data['z'], bins='knuth',
histtype='stepfilled', ec='k', fc='#F5CCB0')
plt.xlim(0, 0.4)
plt.xlabel('z (redshift)')
plt.ylabel('dN/dz(z)')
plt.show()
| bsd-2-clause |
widdowquinn/Teaching-2015-03-17-UoD_compgenvis | workshop_1/bs32010/ex02.py | 2 | 2123 | # ex02.py
#
# Functions and data useful in exercise 2 (k-mer spectra) of
# the BS32010 course at the University of Dundee
import pandas as pd
from collections import defaultdict
import os
bact_datadir = "genome_data/gc_content"
files = {"Mycoplasma genitalium": ("NC_018495.fna",
"NC_018496.fna",
"NC_018497.fna",
"NC_018498.fna"),
"Mycoplasma pneumoniae": ("NC_000912.fna",
"NC_016807.fna",
"NC_017504.fna",
"NC_020076.fna"),
"Nostoc punctiforme": ("NC_010628.fna",),
"Escherichia coli": ("NC_000913.fna",
"NC_002695.fna",
"NC_004431.fna",
"NC_010468.fna"),
"Mycobacterium tuberculosis": ("NC_016934.fna",
"NC_017523.fna",
"NC_022350.fna",
"NC_000962.fna")}
bacteria = files.keys()
bact_files = {}
for k, v in files.items():
bact_files[k] = tuple([os.path.join(bact_datadir, fn) for fn in v])
def count_str_kmers(instr, k, kdict=None):
"""Counts sequences of size k in instr, populating kdict.
Loops over instr with a window of size k, populating the
dictionary kdict with a count of occurrences of each k-mer.
Returns the dictionary kdict.
"""
if kdict is None:
kdict = defaultdict(int)
for idx in range(len(instr)-k):
kdict[instr[idx:idx+k]] += 1
return kdict
def count_seq_kmers(inseq, k):
"""Counts kmers of size k in the sequence inseq.
Counts kmers in forward and reverse directions, returning
a Pandas dataframe of k-mer and count.
"""
kdict = count_str_kmers(str(inseq.seq), k)
kdict = count_str_kmers(str(inseq.reverse_complement().seq), k, kdict)
df = pd.DataFrame.from_dict(kdict, orient="index")
df.columns = ("frequency",)
return df
| mit |
RNAer/qiita | qiita_db/metadata_template/prep_template.py | 1 | 19189 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from os.path import join
from time import strftime
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_db.exceptions import (QiitaDBColumnError, QiitaDBUnknownIDError,
QiitaDBError, QiitaDBExecutionError)
from qiita_db.sql_connection import SQLConnectionHandler
from qiita_db.ontology import Ontology
from qiita_db.util import (get_emp_status, convert_to_id,
convert_from_id, get_mountpoint, infer_status)
from .base_metadata_template import BaseSample, MetadataTemplate
from .util import load_template_to_dataframe
from .constants import (TARGET_GENE_DATA_TYPES, RENAME_COLS_DICT,
REQUIRED_TARGET_GENE_COLS)
class PrepSample(BaseSample):
r"""Class that models a sample present in a PrepTemplate.
See Also
--------
BaseSample
Sample
"""
_table = "common_prep_info"
_table_prefix = "prep_"
_column_table = "prep_columns"
_id_column = "prep_template_id"
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : PrepTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If `md_template` is not a PrepTemplate object
"""
if not isinstance(md_template, PrepTemplate):
raise IncompetentQiitaDeveloperError()
class PrepTemplate(MetadataTemplate):
r"""Represent the PrepTemplate of a raw data. Provides access to the
tables in the DB that holds the sample preparation information.
See Also
--------
MetadataTemplate
SampleTemplate
"""
_table = "common_prep_info"
_table_prefix = "prep_"
_column_table = "prep_columns"
_id_column = "prep_template_id"
translate_cols_dict = {'emp_status_id': 'emp_status'}
id_cols_handlers = {'emp_status_id': get_emp_status()}
str_cols_handlers = {'emp_status_id': get_emp_status(key='emp_status_id')}
_sample_cls = PrepSample
@classmethod
def create(cls, md_template, raw_data, study, data_type,
investigation_type=None):
r"""Creates the metadata template in the database
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples Ids
raw_data : RawData
The raw_data to which the prep template belongs to.
study : Study
The study to which the prep template belongs to.
data_type : str or int
The data_type of the prep template
investigation_type : str, optional
The investigation type, if relevant
Returns
-------
A new instance of `cls` to access to the PrepTemplate stored in the DB
Raises
------
QiitaDBColumnError
If the investigation_type is not valid
If a required column is missing in md_template
"""
# If the investigation_type is supplied, make sure it is one of
# the recognized investigation types
if investigation_type is not None:
cls.validate_investigation_type(investigation_type)
# Get a connection handler
conn_handler = SQLConnectionHandler()
queue_name = "CREATE_PREP_TEMPLATE_%d" % raw_data.id
conn_handler.create_queue(queue_name)
# Check if the data_type is the id or the string
if isinstance(data_type, (int, long)):
data_type_id = data_type
data_type_str = convert_from_id(data_type, "data_type",
conn_handler)
else:
data_type_id = convert_to_id(data_type, "data_type", conn_handler)
data_type_str = data_type
md_template = cls._clean_validate_template(md_template, study.id,
data_type_str, conn_handler)
# Insert the metadata template
# We need the prep_id for multiple calls below, which currently is not
# supported by the queue system. Thus, executing this outside the queue
prep_id = conn_handler.execute_fetchone(
"INSERT INTO qiita.prep_template (data_type_id, raw_data_id, "
"investigation_type) VALUES (%s, %s, %s) RETURNING "
"prep_template_id", (data_type_id, raw_data.id,
investigation_type))[0]
cls._add_common_creation_steps_to_queue(md_template, prep_id,
conn_handler, queue_name)
try:
conn_handler.execute_queue(queue_name)
except Exception:
# Clean up row from qiita.prep_template
conn_handler.execute(
"DELETE FROM qiita.prep_template where "
"{0} = %s".format(cls._id_column), (prep_id,))
# Check if sample IDs present here but not in sample template
sql = ("SELECT sample_id from qiita.required_sample_info WHERE "
"study_id = %s")
# Get list of study sample IDs, prep template study IDs,
# and their intersection
prep_samples = set(md_template.index.values)
unknown_samples = prep_samples.difference(
s[0] for s in conn_handler.execute_fetchall(sql, [study.id]))
if unknown_samples:
raise QiitaDBExecutionError(
'Samples found in prep template but not sample template: '
'%s' % ', '.join(unknown_samples))
# some other error we haven't seen before so raise it
raise
pt = cls(prep_id)
pt.generate_files()
return pt
@classmethod
def validate_investigation_type(self, investigation_type):
"""Simple investigation validation to avoid code duplication
Parameters
----------
investigation_type : str
The investigation type, should be part of the ENA ontology
Raises
-------
QiitaDBColumnError
The investigation type is not in the ENA ontology
"""
ontology = Ontology(convert_to_id('ENA', 'ontology'))
terms = ontology.terms + ontology.user_defined_terms
if investigation_type not in terms:
raise QiitaDBColumnError("'%s' is Not a valid investigation_type. "
"Choose from: %s" % (investigation_type,
', '.join(terms)))
@classmethod
def _check_template_special_columns(cls, md_template, data_type):
r"""Checks for special columns based on obj type
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
data_type : str
The data_type of the template.
Returns
-------
set
The set of missing columns
Notes
-----
Sometimes people use different names for the same columns. We just
rename them to use the naming that we expect, so this is normalized
across studies.
"""
# We only have column requirements if the data type of the raw data
# is one of the target gene types
missing_cols = set()
if data_type in TARGET_GENE_DATA_TYPES:
md_template.rename(columns=RENAME_COLS_DICT, inplace=True)
# Check for all required columns for target genes studies
missing_cols = REQUIRED_TARGET_GENE_COLS.difference(
md_template.columns)
return missing_cols
@classmethod
def delete(cls, id_):
r"""Deletes the table from the database
Parameters
----------
id_ : obj
The object identifier
Raises
------
QiitaDBExecutionError
If the prep template already has a preprocessed data
QiitaDBUnknownIDError
If no prep template with id = id_ exists
"""
table_name = cls._table_name(id_)
conn_handler = SQLConnectionHandler()
if not cls.exists(id_):
raise QiitaDBUnknownIDError(id_, cls.__name__)
preprocessed_data_exists = conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.prep_template_preprocessed_data"
" WHERE prep_template_id=%s)", (id_,))[0]
if preprocessed_data_exists:
raise QiitaDBExecutionError("Cannot remove prep template %d "
"because a preprocessed data has been"
" already generated using it." % id_)
# Delete the prep template filepaths
conn_handler.execute(
"DELETE FROM qiita.prep_template_filepath WHERE "
"prep_template_id = %s", (id_, ))
# Drop the prep_X table
conn_handler.execute(
"DROP TABLE qiita.{0}".format(table_name))
# Remove the rows from common_prep_info
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._table,
cls._id_column),
(id_,))
# Remove the rows from prep_columns
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._column_table,
cls._id_column),
(id_,))
# Remove the row from prep_template
conn_handler.execute(
"DELETE FROM qiita.prep_template where "
"{0} = %s".format(cls._id_column), (id_,))
def data_type(self, ret_id=False):
"""Returns the data_type or the data_type id
Parameters
----------
ret_id : bool, optional
If true, return the id instead of the string, default false.
Returns
-------
str or int
string value of data_type or data_type_id if ret_id is True
"""
ret = "_id" if ret_id else ""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT d.data_type{0} FROM qiita.data_type d JOIN "
"qiita.prep_template p ON p.data_type_id = d.data_type_id WHERE "
"p.prep_template_id=%s".format(ret), (self.id,))[0]
@property
def raw_data(self):
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT raw_data_id FROM qiita.prep_template "
"WHERE prep_template_id=%s", (self.id,))[0]
@property
def preprocessed_data(self):
conn_handler = SQLConnectionHandler()
prep_datas = conn_handler.execute_fetchall(
"SELECT preprocessed_data_id FROM "
"qiita.prep_template_preprocessed_data WHERE prep_template_id=%s",
(self.id,))
return [x[0] for x in prep_datas]
@property
def preprocessing_status(self):
r"""Tells if the data has been preprocessed or not
Returns
-------
str
One of {'not_preprocessed', 'preprocessing', 'success', 'failed'}
"""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT preprocessing_status FROM qiita.prep_template "
"WHERE {0}=%s".format(self._id_column), (self.id,))[0]
@preprocessing_status.setter
def preprocessing_status(self, state):
r"""Update the preprocessing status
Parameters
----------
state : str, {'not_preprocessed', 'preprocessing', 'success', 'failed'}
The current status of preprocessing
Raises
------
ValueError
If the state is not known.
"""
if (state not in ('not_preprocessed', 'preprocessing', 'success') and
not state.startswith('failed:')):
raise ValueError('Unknown state: %s' % state)
conn_handler = SQLConnectionHandler()
conn_handler.execute(
"UPDATE qiita.prep_template SET preprocessing_status = %s "
"WHERE {0} = %s".format(self._id_column),
(state, self.id))
@property
def investigation_type(self):
conn_handler = SQLConnectionHandler()
sql = ("SELECT investigation_type FROM qiita.prep_template "
"WHERE {0} = %s".format(self._id_column))
return conn_handler.execute_fetchone(sql, [self._id])[0]
@investigation_type.setter
def investigation_type(self, investigation_type):
r"""Update the investigation type
Parameters
----------
investigation_type : str
The investigation type to set, should be part of the ENA ontology
Raises
------
QiitaDBColumnError
If the investigation type is not a valid ENA ontology
"""
if investigation_type is not None:
self.validate_investigation_type(investigation_type)
conn_handler = SQLConnectionHandler()
conn_handler.execute(
"UPDATE qiita.prep_template SET investigation_type = %s "
"WHERE {0} = %s".format(self._id_column),
(investigation_type, self.id))
@property
def study_id(self):
"""Gets the study id with which this prep template is associated
Returns
-------
int
The ID of the study with which this prep template is associated
"""
conn = SQLConnectionHandler()
sql = ("SELECT srd.study_id FROM qiita.prep_template pt JOIN "
"qiita.study_raw_data srd ON pt.raw_data_id = srd.raw_data_id "
"WHERE prep_template_id = %d" % self.id)
study_id = conn.execute_fetchone(sql)
if study_id:
return study_id[0]
else:
raise QiitaDBError("No studies found associated with prep "
"template ID %d" % self._id)
def generate_files(self):
r"""Generates all the files that contain data from this template
"""
# figuring out the filepath of the prep template
_id, fp = get_mountpoint('templates')[0]
fp = join(fp, '%d_prep_%d_%s.txt' % (self.study_id, self._id,
strftime("%Y%m%d-%H%M%S")))
# storing the template
self.to_file(fp)
# adding the fp to the object
self.add_filepath(fp)
# creating QIIME mapping file
self.create_qiime_mapping_file(fp)
def create_qiime_mapping_file(self, prep_template_fp):
"""This creates the QIIME mapping file and links it in the db.
Parameters
----------
prep_template_fp : str
The prep template filepath that should be concatenated to the
sample template go used to generate a new QIIME mapping file
Returns
-------
filepath : str
The filepath of the created QIIME mapping file
Raises
------
ValueError
If the prep template is not a subset of the sample template
"""
rename_cols = {
'barcode': 'BarcodeSequence',
'barcodesequence': 'BarcodeSequence',
'primer': 'LinkerPrimerSequence',
'linkerprimersequence': 'LinkerPrimerSequence',
'description': 'Description',
}
# getting the latest sample template
conn_handler = SQLConnectionHandler()
sql = """SELECT filepath_id, filepath
FROM qiita.filepath
JOIN qiita.sample_template_filepath
USING (filepath_id)
WHERE study_id=%s
ORDER BY filepath_id DESC"""
sample_template_fname = conn_handler.execute_fetchall(
sql, (self.study_id,))[0][1]
_, fp = get_mountpoint('templates')[0]
sample_template_fp = join(fp, sample_template_fname)
# reading files via pandas
st = load_template_to_dataframe(sample_template_fp)
pt = load_template_to_dataframe(prep_template_fp)
st_sample_names = set(st.index)
pt_sample_names = set(pt.index)
if not pt_sample_names.issubset(st_sample_names):
raise ValueError(
"Prep template is not a sub set of the sample template, files:"
"%s %s - samples: %s" % (sample_template_fp, prep_template_fp,
str(pt_sample_names-st_sample_names)))
mapping = pt.join(st, lsuffix="_prep")
mapping.rename(columns=rename_cols, inplace=True)
# Gets the orginal mapping columns and readjust the order to comply
# with QIIME requirements
cols = mapping.columns.values.tolist()
cols.remove('BarcodeSequence')
cols.remove('LinkerPrimerSequence')
cols.remove('Description')
new_cols = ['BarcodeSequence', 'LinkerPrimerSequence']
new_cols.extend(cols)
new_cols.append('Description')
mapping = mapping[new_cols]
# figuring out the filepath for the QIIME map file
_id, fp = get_mountpoint('templates')[0]
filepath = join(fp, '%d_prep_%d_qiime_%s.txt' % (self.study_id,
self.id, strftime("%Y%m%d-%H%M%S")))
# Save the mapping file
mapping.to_csv(filepath, index_label='#SampleID', na_rep='unknown',
sep='\t')
# adding the fp to the object
self.add_filepath(filepath)
return filepath
@property
def status(self):
"""The status of the prep template
Returns
-------
str
The status of the prep template
Notes
-----
The status of a prep template is inferred by the status of the
processed data generated from this prep template. If no processed
data has been generated with this prep template; then the status
is 'sandbox'.
"""
conn_handler = SQLConnectionHandler()
sql = """SELECT processed_data_status
FROM qiita.processed_data_status pds
JOIN qiita.processed_data pd
USING (processed_data_status_id)
JOIN qiita.preprocessed_processed_data ppd_pd
USING (processed_data_id)
JOIN qiita.prep_template_preprocessed_data pt_ppd
USING (preprocessed_data_id)
WHERE pt_ppd.prep_template_id=%s"""
pd_statuses = conn_handler.execute_fetchall(sql, (self._id,))
return infer_status(pd_statuses)
| bsd-3-clause |
saketkc/statsmodels | statsmodels/base/model.py | 25 | 76781 | from __future__ import print_function
from statsmodels.compat.python import iterkeys, lzip, range, reduce
import numpy as np
from scipy import stats
from statsmodels.base.data import handle_data
from statsmodels.tools.tools import recipr, nan_dot
from statsmodels.stats.contrast import ContrastResults, WaldTestResults
from statsmodels.tools.decorators import resettable_cache, cache_readonly
import statsmodels.base.wrapper as wrap
from statsmodels.tools.numdiff import approx_fprime
from statsmodels.formula import handle_formula_data
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.base.optimizer import Optimizer
_model_params_doc = """
Parameters
----------
endog : array-like
1-d endogenous response variable. The dependent variable.
exog : array-like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See
:func:`statsmodels.tools.add_constant`."""
_missing_param_doc = """\
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none.'"""
_extra_param_doc = """
hasconst : None or bool
Indicates whether the RHS includes a user-supplied constant. If True,
a constant is not checked for and k_constant is set to 1 and all
result statistics are calculated as if a constant is present. If
False, a constant is not checked for and k_constant is set to 0.
"""
class Model(object):
__doc__ = """
A (predictive) statistical model. Intended to be subclassed not used.
%(params_doc)s
%(extra_params_doc)s
Notes
-----
`endog` and `exog` are references to any data provided. So if the data is
already stored in numpy arrays and it is changed then `endog` and `exog`
will change as well.
""" % {'params_doc' : _model_params_doc,
'extra_params_doc' : _missing_param_doc + _extra_param_doc}
def __init__(self, endog, exog=None, **kwargs):
missing = kwargs.pop('missing', 'none')
hasconst = kwargs.pop('hasconst', None)
self.data = self._handle_data(endog, exog, missing, hasconst,
**kwargs)
self.k_constant = self.data.k_constant
self.exog = self.data.exog
self.endog = self.data.endog
self._data_attr = []
self._data_attr.extend(['exog', 'endog', 'data.exog', 'data.endog'])
if 'formula' not in kwargs: # won't be able to unpickle without these
self._data_attr.extend(['data.orig_endog', 'data.orig_exog'])
# store keys for extras if we need to recreate model instance
# we don't need 'missing', maybe we need 'hasconst'
self._init_keys = list(kwargs.keys())
if hasconst is not None:
self._init_keys.append('hasconst')
def _get_init_kwds(self):
"""return dictionary with extra keys used in model.__init__
"""
kwds = dict(((key, getattr(self, key, None))
for key in self._init_keys))
return kwds
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
data = handle_data(endog, exog, missing, hasconst, **kwargs)
# kwargs arrays could have changed, easier to just attach here
for key in kwargs:
if key in ['design_info', 'formula']: # leave attached to data
continue
# pop so we don't start keeping all these twice or references
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError: # panel already pops keys in data handling
pass
return data
@classmethod
def from_formula(cls, formula, data, subset=None, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
subset : array-like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
#TODO: provide a docs template for args/kwargs from child models
#TODO: subset could use syntax. issue #469.
if subset is not None:
data = data.ix[subset]
eval_env = kwargs.pop('eval_env', None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
else:
eval_env += 1 # we're going down the stack again
missing = kwargs.get('missing', 'drop')
if missing == 'none': # with patys it's drop or raise. let's raise.
missing = 'raise'
tmp = handle_formula_data(data, None, formula, depth=eval_env,
missing=missing)
((endog, exog), missing_idx, design_info) = tmp
kwargs.update({'missing_idx': missing_idx,
'missing': missing,
'formula': formula, # attach formula for unpckling
'design_info': design_info})
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod
@property
def endog_names(self):
return self.data.ynames
@property
def exog_names(self):
return self.data.xnames
def fit(self):
"""
Fit a model to data.
"""
raise NotImplementedError
def predict(self, params, exog=None, *args, **kwargs):
"""
After a model has been fit predict returns the fitted values.
This is a placeholder intended to be overwritten by individual models.
"""
raise NotImplementedError
class LikelihoodModel(Model):
"""
Likelihood model is a subclass of Model.
"""
def __init__(self, endog, exog=None, **kwargs):
super(LikelihoodModel, self).__init__(endog, exog, **kwargs)
self.initialize()
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
pass
# TODO: if the intent is to re-initialize the model with new data then this
# method needs to take inputs...
def loglike(self, params):
"""
Log-likelihood of model.
"""
raise NotImplementedError
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
"""
raise NotImplementedError
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def hessian(self, params):
"""
The Hessian matrix of the model
"""
raise NotImplementedError
def fit(self, start_params=None, method='newton', maxiter=100,
full_output=True, disp=True, fargs=(), callback=None, retall=False,
skip_hessian=False, **kwargs):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
fargs : tuple, optional
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
skip_hessian : bool, optional
If False (default), then the negative inverse hessian is calculated
after the optimization. If True, then the hessian will not be
calculated. However, it will be available in methods that use the
hessian in the optimization (currently only with `"newton"`).
kwargs : keywords
All kwargs are passed to the chosen solver with one exception. The
following keyword controls what happens after the fit::
warn_convergence : bool, optional
If True, checks the model for the converged flag. If the
converged flag is False, a ConvergenceWarning is issued.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for solvers (see returned Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
This many terms are used for the Hessian approximation.
factr : float
A stop condition that is a variant of relative error.
pgtol : float
A stop condition that uses the projected gradient.
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
maxfun : int
Maximum number of function evaluations to make.
bounds : sequence
(min, max) pairs for each element in x,
defining the bounds on that parameter.
Use None for one of min or max when there is no bound
in that direction.
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
"""
Hinv = None # JP error if full_output=0, Hinv not defined
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else:
raise ValueError("If exog is None, then start_params should "
"be specified")
# TODO: separate args from nonarg taking score and hessian, ie.,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
nobs = self.endog.shape[0]
f = lambda params, *args: -self.loglike(params, *args) / nobs
score = lambda params, *args: -self.score(params, *args) / nobs
try:
hess = lambda params, *args: -self.hessian(params, *args) / nobs
except:
hess = None
if method == 'newton':
score = lambda params, *args: self.score(params, *args) / nobs
hess = lambda params, *args: self.hessian(params, *args) / nobs
#TODO: why are score and hess positive?
warn_convergence = kwargs.pop('warn_convergence', True)
optimizer = Optimizer()
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
fargs, kwargs,
hessian=hess,
method=method,
disp=disp,
maxiter=maxiter,
callback=callback,
retall=retall,
full_output=full_output)
#NOTE: this is for fit_regularized and should be generalized
cov_params_func = kwargs.setdefault('cov_params_func', None)
if cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == 'newton' and full_output:
Hinv = np.linalg.inv(-retvals['Hessian']) / nobs
elif not skip_hessian:
try:
Hinv = np.linalg.inv(-1 * self.hessian(xopt))
except:
#might want custom warning ResultsWarning? NumericalWarning?
from warnings import warn
warndoc = ('Inverting hessian failed, no bse or '
'cov_params available')
warn(warndoc, RuntimeWarning)
Hinv = None
if 'cov_type' in kwargs:
cov_kwds = kwargs.get('cov_kwds', {})
kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}
else:
kwds = {}
if 'use_t' in kwargs:
kwds['use_t'] = kwargs['use_t']
#prints for debugging
#print('kwargs inLikelihoodModel.fit', kwargs)
#print('kwds inLikelihoodModel.fit', kwds)
#TODO: add Hessian approximation and change the above if needed
mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1., **kwds)
#TODO: hardcode scale?
if isinstance(retvals, dict):
mlefit.mle_retvals = retvals
if warn_convergence and not retvals['converged']:
from warnings import warn
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warn("Maximum Likelihood optimization failed to converge. "
"Check mle_retvals", ConvergenceWarning)
mlefit.mle_settings = optim_settings
return mlefit
#TODO: the below is unfinished
class GenericLikelihoodModel(LikelihoodModel):
"""
Allows the fitting of any likelihood function via maximum likelihood.
A subclass needs to specify at least the log-likelihood
If the log-likelihood is specified for each observation, then results that
require the Jacobian will be available. (The other case is not tested yet.)
Notes
-----
Optimization methods that require only a likelihood function are 'nm' and
'powell'
Optimization methods that require a likelihood function and a
score/gradient are 'bfgs', 'cg', and 'ncg'. A function to compute the
Hessian is optional for 'ncg'.
Optimization method that require a likelihood function, a score/gradient,
and a Hessian is 'newton'
If they are not overwritten by a subclass, then numerical gradient,
Jacobian and Hessian of the log-likelihood are caclulated by numerical
forward differentiation. This might results in some cases in precision
problems, and the Hessian might not be positive definite. Even if the
Hessian is not positive definite the covariance matrix of the parameter
estimates based on the outer product of the Jacobian might still be valid.
Examples
--------
see also subclasses in directory miscmodels
import statsmodels.api as sm
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog)
# in this dir
from model import GenericLikelihoodModel
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
import numpy as np
np.allclose(res.params, probit_res.params)
"""
def __init__(self, endog, exog=None, loglike=None, score=None,
hessian=None, missing='none', extra_params_names=None,
**kwds):
# let them be none in case user wants to use inheritance
if not loglike is None:
self.loglike = loglike
if not score is None:
self.score = score
if not hessian is None:
self.hessian = hessian
self.__dict__.update(kwds)
# TODO: data structures?
#TODO temporary solution, force approx normal
#self.df_model = 9999
#somewhere: CacheWriteWarning: 'df_model' cannot be overwritten
super(GenericLikelihoodModel, self).__init__(endog, exog,
missing=missing)
# this won't work for ru2nmnl, maybe np.ndim of a dict?
if exog is not None:
#try:
self.nparams = (exog.shape[1] if np.ndim(exog) == 2 else 1)
if extra_params_names is not None:
self._set_extra_params_names(extra_params_names)
def _set_extra_params_names(self, extra_params_names):
# check param_names
if extra_params_names is not None:
if self.exog is not None:
self.exog_names.extend(extra_params_names)
else:
self.data.xnames = extra_params_names
self.nparams = len(self.exog_names)
#this is redundant and not used when subclassing
def initialize(self):
if not self.score: # right now score is not optional
self.score = approx_fprime
if not self.hessian:
pass
else: # can use approx_hess_p if we have a gradient
if not self.hessian:
pass
#Initialize is called by
#statsmodels.model.LikelihoodModel.__init__
#and should contain any preprocessing that needs to be done for a model
from statsmodels.tools import tools
if self.exog is not None:
# assume constant
self.df_model = float(np_matrix_rank(self.exog) - 1)
self.df_resid = (float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
else:
self.df_model = np.nan
self.df_resid = np.nan
super(GenericLikelihoodModel, self).initialize()
def expandparams(self, params):
'''
expand to full parameter array when some parameters are fixed
Parameters
----------
params : array
reduced parameter array
Returns
-------
paramsfull : array
expanded parameter array where fixed parameters are included
Notes
-----
Calling this requires that self.fixed_params and self.fixed_paramsmask
are defined.
*developer notes:*
This can be used in the log-likelihood to ...
this could also be replaced by a more general parameter
transformation.
'''
paramsfull = self.fixed_params.copy()
paramsfull[self.fixed_paramsmask] = params
return paramsfull
def reduceparams(self, params):
return params[self.fixed_paramsmask]
def loglike(self, params):
return self.loglikeobs(params).sum(0)
def nloglike(self, params):
return -self.loglikeobs(params).sum(0)
def loglikeobs(self, params):
return -self.nloglikeobs(params)
def score(self, params):
'''
Gradient of log-likelihood evaluated at params
'''
kwds = {}
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglike, **kwds).ravel()
def score_obs(self, params, **kwds):
'''
Jacobian/Gradient of log-likelihood evaluated at params for each
observation.
'''
#kwds.setdefault('epsilon', 1e-4)
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglikeobs, **kwds)
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7.")
def hessian(self, params):
'''
Hessian of log-likelihood evaluated at params
'''
from statsmodels.tools.numdiff import approx_hess
# need options for hess (epsilon)
return approx_hess(params, self.loglike)
def fit(self, start_params=None, method='nm', maxiter=500, full_output=1,
disp=1, callback=None, retall=0, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.LikelihoodModel.fit
"""
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
else:
start_params = 0.1 * np.ones(self.nparams)
fit_method = super(GenericLikelihoodModel, self).fit
mlefit = fit_method(start_params=start_params,
method=method, maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback, **kwargs)
genericmlefit = GenericLikelihoodModelResults(self, mlefit)
#amend param names
exog_names = [] if (self.exog_names is None) else self.exog_names
k_miss = len(exog_names) - len(mlefit.params)
if not k_miss == 0:
if k_miss < 0:
self._set_extra_params_names(
['par%d' % i for i in range(-k_miss)])
else:
# I don't want to raise after we have already fit()
import warnings
warnings.warn('more exog_names than parameters', UserWarning)
return genericmlefit
#fit.__doc__ += LikelihoodModel.fit.__doc__
class Results(object):
"""
Class to contain model results
Parameters
----------
model : class instance
the previously specified model instance
params : array
parameter estimates from the fit model
"""
def __init__(self, model, params, **kwd):
self.__dict__.update(kwd)
self.initialize(model, params, **kwd)
self._data_attr = []
def initialize(self, model, params, **kwd):
self.params = params
self.model = model
if hasattr(model, 'k_constant'):
self.k_constant = model.k_constant
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray or pandas.Series
See self.model.predict
"""
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return self.model.predict(self.params, exog, *args, **kwargs)
#TODO: public method?
class LikelihoodModelResults(Results):
"""
Class to contain results from likelihood models
Parameters
-----------
model : LikelihoodModel instance or subclass instance
LikelihoodModelResults holds a reference to the model that is fit.
params : 1d array_like
parameter estimates from estimated model
normalized_cov_params : 2d array
Normalized (before scaling) covariance of params. (dot(X.T,X))**-1
scale : float
For (some subset of models) scale will typically be the
mean square error from the estimated model (sigma^2)
Returns
-------
**Attributes**
mle_retvals : dict
Contains the values returned from the chosen optimization method if
full_output is True during the fit. Available only if the model
is fit by maximum likelihood. See notes below for the output from
the different methods.
mle_settings : dict
Contains the arguments passed to the chosen optimization method.
Available if the model is fit by maximum likelihood. See
LikelihoodModel.fit for more information.
model : model instance
LikelihoodResults contains a reference to the model that is fit.
params : ndarray
The parameters estimated for the model.
scale : float
The scaling factor of the model given during instantiation.
tvalues : array
The t-values of the standard errors.
Notes
-----
The covariance of params is given by scale times normalized_cov_params.
Return values by solver if full_output is True during fit:
'newton'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
score : ndarray
The score vector at the optimum.
Hessian : ndarray
The Hessian at the optimum.
warnflag : int
1 if maxiter is exceeded. 0 if successful convergence.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'nm'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
warnflag : int
1: Maximum number of function evaluations made.
2: Maximum number of iterations reached.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'bfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
Hinv : ndarray
value of the inverse Hessian matrix at minimum. Note
that this is just an approximation and will often be
different from the value of the analytic Hessian.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient
and/or function calls are not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'lbfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
fcalls : int
Number of calls to loglike.
warnflag : int
Warning flag:
- 0 if converged
- 1 if too many function evaluations or too many iterations
- 2 if stopped for another reason
converged : bool
True: converged. False: did not converge.
'powell'
fopt : float
Value of the (negative) loglikelihood at its minimum.
direc : ndarray
Current direction set.
iterations : int
Number of iterations performed.
fcalls : int
Number of calls to loglike.
warnflag : int
1: Maximum number of function evaluations. 2: Maximum number
of iterations.
converged : bool
True : converged. False: did not converge.
allvecs : list
Results at each iteration.
'cg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient and/
or function calls not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'ncg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
hcalls : int
Number of calls to hessian.
warnflag : int
1: Maximum number of iterations exceeded.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
"""
# by default we use normal distribution
# can be overwritten by instances or subclasses
use_t = False
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
**kwargs):
super(LikelihoodModelResults, self).__init__(model, params)
self.normalized_cov_params = normalized_cov_params
self.scale = scale
# robust covariance
# We put cov_type in kwargs so subclasses can decide in fit whether to
# use this generic implementation
if 'use_t' in kwargs:
use_t = kwargs['use_t']
if use_t is not None:
self.use_t = use_t
if 'cov_type' in kwargs:
cov_type = kwargs.get('cov_type', 'nonrobust')
cov_kwds = kwargs.get('cov_kwds', {})
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
use_t = self.use_t
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
def normalized_cov_params(self):
raise NotImplementedError
def _get_robustcov_results(self, cov_type='nonrobust', use_self=True,
use_t=None, **cov_kwds):
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def tvalues(self):
"""
Return the t-statistic for a given parameter estimate.
"""
return self.params / self.bse
@cache_readonly
def pvalues(self):
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid)*2
else:
return stats.norm.sf(np.abs(self.tvalues))*2
def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
other=None):
"""
Returns the variance/covariance matrix.
The variance/covariance matrix can be of a linear contrast
of the estimates of params or all params multiplied by scale which
will usually be an estimate of sigma^2. Scale is assumed to be
a scalar.
Parameters
----------
r_matrix : array-like
Can be 1d, or 2d. Can be used alone or with other.
column : array-like, optional
Must be used on its own. Can be 0d or 1d see below.
scale : float, optional
Can be specified or not. Default is None, which means that
the scale argument is taken from the model.
other : array-like, optional
Can be used when r_matrix is specified.
Returns
-------
cov : ndarray
covariance matrix of the parameter estimates or of linear
combination of parameter estimates. See Notes.
Notes
-----
(The below are assumed to be in matrix notation.)
If no argument is specified returns the covariance matrix of a model
``(scale)*(X.T X)^(-1)``
If contrast is specified it pre and post-multiplies as follows
``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``
If contrast and other are specified returns
``(scale) * r_matrix (X.T X)^(-1) other.T``
If column is specified returns
``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d
OR
``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
"""
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
dot_fun = nan_dot
else:
dot_fun = np.dot
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'(unnormalized) covariances')
if column is not None and (r_matrix is not None or other is not None):
raise ValueError('Column should be specified without other '
'arguments.')
if other is not None and r_matrix is None:
raise ValueError('other can only be specified with r_matrix')
if cov_p is None:
if hasattr(self, 'cov_params_default'):
cov_p = self.cov_params_default
else:
if scale is None:
scale = self.scale
cov_p = self.normalized_cov_params * scale
if column is not None:
column = np.asarray(column)
if column.shape == ():
return cov_p[column, column]
else:
#return cov_p[column][:, column]
return cov_p[column[:, None], column]
elif r_matrix is not None:
r_matrix = np.asarray(r_matrix)
if r_matrix.shape == ():
raise ValueError("r_matrix should be 1d or 2d")
if other is None:
other = r_matrix
else:
other = np.asarray(other)
tmp = dot_fun(r_matrix, dot_fun(cov_p, np.transpose(other)))
return tmp
else: # if r_matrix is None and column is None:
return cov_p
#TODO: make sure this works as needed for GLMs
def t_test(self, r_matrix, cov_p=None, scale=None,
use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array-like, str, tuple
- array : If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q). If q is given,
can be either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> r = np.zeros_like(results.params)
>>> r[5:] = [1,-1]
>>> print(r)
[ 0. 0. 0. 0. 0. 1. -1.]
r tests that the coefficients on the 5th and 6th independent
variable are the same.
>>> T_test = results.t_test(r)
>>> print(T_test)
<T contrast: effect=-1829.2025687192481, sd=455.39079425193762,
t=-4.0167754636411717, p=0.0015163772380899498, df_denom=9>
>>> T_test.effect
-1829.2025687192481
>>> T_test.sd
455.39079425193762
>>> T_test.tvalue
-4.0167754636411717
>>> T_test.pvalue
0.0015163772380899498
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.formula.api import ols
>>> dta = sm.datasets.longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
>>> t_test = results.t_test(hypotheses)
>>> print(t_test)
See Also
---------
tvalues : individual t statistics
f_test : for F tests
patsy.DesignInfo.linear_constraint
"""
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
num_ttests = r_matrix.shape[0]
num_params = r_matrix.shape[1]
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('Need covariance of parameters for computing '
'T statistics')
if num_params != self.params.shape[0]:
raise ValueError('r_matrix and params are not aligned')
if q_matrix is None:
q_matrix = np.zeros(num_ttests)
else:
q_matrix = np.asarray(q_matrix)
q_matrix = q_matrix.squeeze()
if q_matrix.size > 1:
if q_matrix.shape[0] != num_ttests:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
if use_t is None:
#switch to use_t false if undefined
use_t = (hasattr(self, 'use_t') and self.use_t)
_t = _sd = None
_effect = np.dot(r_matrix, self.params)
# nan_dot multiplies with the convention nan * 0 = 0
# Perform the test
if num_ttests > 1:
_sd = np.sqrt(np.diag(self.cov_params(
r_matrix=r_matrix, cov_p=cov_p)))
else:
_sd = np.sqrt(self.cov_params(r_matrix=r_matrix, cov_p=cov_p))
_t = (_effect - q_matrix) * recipr(_sd)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_t:
return ContrastResults(effect=_effect, t=_t, sd=_sd,
df_denom=df_resid)
else:
return ContrastResults(effect=_effect, statistic=_t, sd=_sd,
df_denom=df_resid,
distribution='norm')
def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None):
"""
Compute the F-test for a joint linear hypothesis.
This is a special case of `wald_test` that always uses the F
distribution.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length k row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> A = np.identity(len(results.params))
>>> A = A[1:,:]
This tests that each coefficient is jointly statistically
significantly different from zero.
>>> print(results.f_test(A))
<F contrast: F=330.28533923463488, p=4.98403052872e-10,
df_denom=9, df_num=6>
Compare this to
>>> results.fvalue
330.2853392346658
>>> results.f_pvalue
4.98403096572e-10
>>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))
This tests that the coefficient on the 2nd and 3rd regressors are
equal and jointly that the coefficient on the 5th and 6th regressors
are equal.
>>> print(results.f_test(B))
<F contrast: F=9.740461873303655, p=0.00560528853174, df_denom=9,
df_num=2>
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.datasets import longley
>>> from statsmodels.formula.api import ols
>>> dta = longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
>>> f_test = results.f_test(hypotheses)
>>> print(f_test)
See Also
--------
statsmodels.stats.contrast.ContrastResults
wald_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
res = self.wald_test(r_matrix, cov_p=cov_p, scale=scale,
invcov=invcov, use_f=True)
return res
#TODO: untested for GLMs?
def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,
use_f=None):
"""
Compute a Wald-test for a joint linear hypothesis.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed that the
linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
use_f : bool
If True, then the F-distribution is used. If False, then the
asymptotic distribution, chisquare is used. If use_f is None, then
the F distribution is used if the model specifies that use_t is True.
The test statistic is proportionally adjusted for the distribution
by the number of constraints in the hypothesis.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
See also
--------
statsmodels.stats.contrast.ContrastResults
f_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
if use_f is None:
#switch to use_t false if undefined
use_f = (hasattr(self, 'use_t') and self.use_t)
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
if (self.normalized_cov_params is None and cov_p is None and
invcov is None and not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'F statistics')
cparams = np.dot(r_matrix, self.params[:, None])
J = float(r_matrix.shape[0]) # number of restrictions
if q_matrix is None:
q_matrix = np.zeros(J)
else:
q_matrix = np.asarray(q_matrix)
if q_matrix.ndim == 1:
q_matrix = q_matrix[:, None]
if q_matrix.shape[0] != J:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
Rbq = cparams - q_matrix
if invcov is None:
cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)
if np.isnan(cov_p).max():
raise ValueError("r_matrix performs f_test for using "
"dimensions that are asymptotically "
"non-normal")
invcov = np.linalg.inv(cov_p)
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
F = nan_dot(nan_dot(Rbq.T, invcov), Rbq)
else:
F = np.dot(np.dot(Rbq.T, invcov), Rbq)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_f:
F /= J
return ContrastResults(F=F, df_denom=df_resid,
df_num=invcov.shape[0])
else:
return ContrastResults(chi2=F, df_denom=J, statistic=F,
distribution='chi2', distargs=(J,))
def wald_test_terms(self, skip_single=False, extra_constraints=None,
combine_terms=None):
"""
Compute a sequence of Wald tests for terms over multiple columns
This computes joined Wald tests for the hypothesis that all
coefficients corresponding to a `term` are zero.
`Terms` are defined by the underlying formula or by string matching.
Parameters
----------
skip_single : boolean
If true, then terms that consist only of a single column and,
therefore, refers only to a single parameter is skipped.
If false, then all terms are included.
extra_constraints : ndarray
not tested yet
combine_terms : None or list of strings
Each string in this list is matched to the name of the terms or
the name of the exogenous variables. All columns whose name
includes that string are combined in one joint test.
Returns
-------
test_result : result instance
The result instance contains `table` which is a pandas DataFrame
with the test results: test statistic, degrees of freedom and
pvalues.
Examples
--------
>>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
>>> res_ols.wald_test_terms()
<class 'statsmodels.stats.contrast.WaldTestResults'>
F P>F df constraint df denom
Intercept 279.754525 2.37985521351e-22 1 51
C(Duration, Sum) 5.367071 0.0245738436636 1 51
C(Weight, Sum) 12.432445 3.99943118767e-05 2 51
C(Duration, Sum):C(Weight, Sum) 0.176002 0.83912310946 2 51
>>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)",
data).fit(cov_type='HC0')
>>> wt = res_poi.wald_test_terms(skip_single=False,
combine_terms=['Duration', 'Weight'])
>>> print(wt)
chi2 P>chi2 df constraint
Intercept 15.695625 7.43960374424e-05 1
C(Weight) 16.132616 0.000313940174705 2
C(Duration) 1.009147 0.315107378931 1
C(Weight):C(Duration) 0.216694 0.897315972824 2
Duration 11.187849 0.010752286833 3
Weight 30.263368 4.32586407145e-06 4
"""
# lazy import
from collections import defaultdict
result = self
if extra_constraints is None:
extra_constraints = []
if combine_terms is None:
combine_terms = []
design_info = getattr(result.model.data.orig_exog, 'design_info', None)
if design_info is None and extra_constraints is None:
raise ValueError('no constraints, nothing to do')
identity = np.eye(len(result.params))
constraints = []
combined = defaultdict(list)
if design_info is not None:
for term in design_info.terms:
cols = design_info.slice(term)
name = term.name()
constraint_matrix = identity[cols]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
k_constraint = constraint_matrix.shape[0]
if skip_single:
if k_constraint == 1:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
else:
# check by exog/params names if there is no formula info
for col, name in enumerate(result.model.exog_names):
constraint_matrix = identity[col]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
if skip_single:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
use_t = result.use_t
distribution = ['chi2', 'F'][use_t]
res_wald = []
index = []
for name, constraint in constraints + combined_constraints + extra_constraints:
wt = result.wald_test(constraint)
row = [wt.statistic.item(), wt.pvalue, constraint.shape[0]]
if use_t:
row.append(wt.df_denom)
res_wald.append(row)
index.append(name)
# distribution nerutral names
col_names = ['statistic', 'pvalue', 'df_constraint']
if use_t:
col_names.append('df_denom')
# TODO: maybe move DataFrame creation to results class
from pandas import DataFrame
table = DataFrame(res_wald, index=index, columns=col_names)
res = WaldTestResults(None, distribution, None, table=table)
# TODO: remove temp again, added for testing
res.temp = constraints + combined_constraints + extra_constraints
return res
def conf_int(self, alpha=.05, cols=None, method='default'):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
method : string
Not Implemented Yet
Method to estimate the confidence_interval.
"Default" : uses self.bse which is based on inverse Hessian for MLE
"hjjh" :
"jac" :
"boot-bse"
"boot_quant"
"profile"
Returns
--------
conf_int : array
Each row contains [lower, upper] limits of the confidence interval
for the corresponding parameter. The first column contains all
lower, the second column contains all upper limits.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> results.conf_int()
array([[-5496529.48322745, -1467987.78596704],
[ -177.02903529, 207.15277984],
[ -0.1115811 , 0.03994274],
[ -3.12506664, -0.91539297],
[ -1.5179487 , -0.54850503],
[ -0.56251721, 0.460309 ],
[ 798.7875153 , 2859.51541392]])
>>> results.conf_int(cols=(2,3))
array([[-0.1115811 , 0.03994274],
[-3.12506664, -0.91539297]])
Notes
-----
The confidence interval is based on the standard normal distribution.
Models wish to use a different distribution should overwrite this
method.
"""
bse = self.bse
if self.use_t:
dist = stats.t
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
q = dist.ppf(1 - alpha / 2, df_resid)
else:
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = self.params[cols] - q * bse[cols]
upper = self.params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def save(self, fname, remove_data=False):
'''
save a pickle of this instance
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
Notes
-----
If remove_data is true and the model result does not implement a
remove_data method then this will raise an exception.
'''
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname)
@classmethod
def load(cls, fname):
'''
load a pickle, (class method)
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
Returns
-------
unpickled instance
'''
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname)
def remove_data(self):
'''remove data arrays, all nobs arrays from result and model
This reduces the size of the instance, so it can be pickled with less
memory. Currently tested for use with predict from an unpickled
results and model instance.
.. warning:: Since data and some intermediate results have been removed
calculating new statistics that require them will raise exceptions.
The exception will occur the first time an attribute is accessed
that has been set to None.
Not fully tested for time series models, tsa, and might delete too much
for prediction or not all that would be possible.
The list of arrays to delete is maintained as an attribute of the
result and model instance, except for cached values. These lists could
be changed before calling remove_data.
'''
def wipe(obj, att):
#get to last element in attribute path
p = att.split('.')
att_ = p.pop(-1)
try:
obj_ = reduce(getattr, [obj] + p)
#print(repr(obj), repr(att))
#print(hasattr(obj_, att_))
if hasattr(obj_, att_):
#print('removing3', att_)
setattr(obj_, att_, None)
except AttributeError:
pass
model_attr = ['model.' + i for i in self.model._data_attr]
for att in self._data_attr + model_attr:
#print('removing', att)
wipe(self, att)
data_in_cache = getattr(self, 'data_in_cache', [])
data_in_cache += ['fittedvalues', 'resid', 'wresid']
for key in data_in_cache:
try:
self._cache[key] = None
except (AttributeError, KeyError):
pass
class LikelihoodResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
'bse': 'columns',
'pvalues': 'columns',
'tvalues': 'columns',
'resid': 'rows',
'fittedvalues': 'rows',
'normalized_cov_params': 'cov',
}
_wrap_attrs = _attrs
_wrap_methods = {
'cov_params': 'cov',
'conf_int': 'columns'
}
wrap.populate_wrapper(LikelihoodResultsWrapper,
LikelihoodModelResults)
class ResultMixin(object):
@cache_readonly
def df_modelwc(self):
# collect different ways of defining the number of parameters, used for
# aic, bic
if hasattr(self, 'df_model'):
if hasattr(self, 'hasconst'):
hasconst = self.hasconst
else:
# default assumption
hasconst = 1
return self.df_model + hasconst
else:
return self.params.size
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_modelwc)
@cache_readonly
def bic(self):
return -2 * self.llf + np.log(self.nobs) * (self.df_modelwc)
@cache_readonly
def score_obsv(self):
'''cached Jacobian of log-likelihood
'''
return self.model.score_obs(self.params)
jacv = np.deprecate(score_obsv, 'jacv', 'score_obsv',
"Use score_obsv attribute."
" jacv will be removed in 0.7.")
@cache_readonly
def hessv(self):
'''cached Hessian of log-likelihood
'''
return self.model.hessian(self.params)
@cache_readonly
def covjac(self):
'''
covariance of parameters based on outer product of jacobian of
log-likelihood
'''
## if not hasattr(self, '_results'):
## raise ValueError('need to call fit first')
## #self.fit()
## self.jacv = jacv = self.jac(self._results.params)
jacv = self.score_obsv
return np.linalg.inv(np.dot(jacv.T, jacv))
@cache_readonly
def covjhj(self):
'''covariance of parameters based on HJJH
dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood
name should be covhjh
'''
jacv = self.score_obsv
hessv = self.hessv
hessinv = np.linalg.inv(hessv)
## self.hessinv = hessin = self.cov_params()
return np.dot(hessinv, np.dot(np.dot(jacv.T, jacv), hessinv))
@cache_readonly
def bsejhj(self):
'''standard deviation of parameter estimates based on covHJH
'''
return np.sqrt(np.diag(self.covjhj))
@cache_readonly
def bsejac(self):
'''standard deviation of parameter estimates based on covjac
'''
return np.sqrt(np.diag(self.covjac))
def bootstrap(self, nrep=100, method='nm', disp=0, store=1):
"""simple bootstrap to get mean and variance of estimator
see notes
Parameters
----------
nrep : int
number of bootstrap replications
method : str
optimization method to use
disp : bool
If true, then optimization prints results
store : bool
If true, then parameter estimates for all bootstrap iterations
are attached in self.bootstrap_results
Returns
-------
mean : array
mean of parameter estimates over bootstrap replications
std : array
standard deviation of parameter estimates over bootstrap
replications
Notes
-----
This was mainly written to compare estimators of the standard errors of
the parameter estimates. It uses independent random sampling from the
original endog and exog, and therefore is only correct if observations
are independently distributed.
This will be moved to apply only to models with independently
distributed observations.
"""
results = []
print(self.model.__class__)
hascloneattr = True if hasattr(self, 'cloneattr') else False
for i in range(nrep):
rvsind = np.random.randint(self.nobs, size=self.nobs)
#this needs to set startparam and get other defining attributes
#need a clone method on model
fitmod = self.model.__class__(self.endog[rvsind],
self.exog[rvsind, :])
if hascloneattr:
for attr in self.model.cloneattr:
setattr(fitmod, attr, getattr(self.model, attr))
fitres = fitmod.fit(method=method, disp=disp)
results.append(fitres.params)
results = np.array(results)
if store:
self.bootstrap_results = results
return results.mean(0), results.std(0), results
def get_nlfun(self, fun):
#I think this is supposed to get the delta method that is currently
#in miscmodels count (as part of Poisson example)
pass
class GenericLikelihoodModelResults(LikelihoodModelResults, ResultMixin):
"""
A results class for the discrete dependent variable models.
..Warning :
The following description has not been updated to this version/class.
Where are AIC, BIC, ....? docstring looks like copy from discretemod
Parameters
----------
model : A DiscreteModel instance
mlefit : instance of LikelihoodResults
This contains the numerical optimization results as returned by
LikelihoodModel.fit(), in a superclass of GnericLikelihoodModels
Returns
-------
*Attributes*
Warning most of these are not available yet
aic : float
Akaike information criterion. -2*(`llf` - p) where p is the number
of regressors including the intercept.
bic : float
Bayesian information criterion. -2*`llf` + ln(`nobs`)*p where p is the
number of regressors including the intercept.
bse : array
The standard errors of the coefficients.
df_resid : float
See model definition.
df_model : float
See model definition.
fitted_values : array
Linear predictor XB.
llf : float
Value of the loglikelihood
llnull : float
Value of the constant-only loglikelihood
llr : float
Likelihood ratio chi-squared statistic; -2*(`llnull` - `llf`)
llr_pvalue : float
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
prsquared : float
McFadden's pseudo-R-squared. 1 - (`llf`/`llnull`)
"""
def __init__(self, model, mlefit):
self.model = model
self.endog = model.endog
self.exog = model.exog
self.nobs = model.endog.shape[0]
# TODO: possibly move to model.fit()
# and outsource together with patching names
if hasattr(model, 'df_model'):
self.df_model = model.df_model
else:
self.df_model = len(mlefit.params)
# retrofitting the model, used in t_test TODO: check design
self.model.df_model = self.df_model
if hasattr(model, 'df_resid'):
self.df_resid = model.df_resid
else:
self.df_resid = self.endog.shape[0] - self.df_model
# retrofitting the model, used in t_test TODO: check design
self.model.df_resid = self.df_resid
self._cache = resettable_cache()
self.__dict__.update(mlefit.__dict__)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Maximum Likelihood']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), # [self.df_resid]),
('Df Model:', None), # [self.df_model])
]
top_right = [ # ('R-squared:', ["%#8.3f" % self.rsquared]),
# ('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
# ('F-statistic:', ["%#8.4g" % self.fvalue] ),
# ('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), # ["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=False)
return smry
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tseries/timedeltas.py | 7 | 6047 | """
timedelta support tools
"""
import numpy as np
import pandas as pd
import pandas.tslib as tslib
from pandas.types.common import (_ensure_object,
is_integer_dtype,
is_timedelta64_dtype,
is_list_like)
from pandas.types.generic import ABCSeries, ABCIndexClass
from pandas.util.decorators import deprecate_kwarg
@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
mapping={True: 'coerce', False: 'raise'})
def to_timedelta(arg, unit='ns', box=True, errors='raise', coerce=None):
"""
Convert argument to timedelta
Parameters
----------
arg : string, timedelta, list, tuple, 1-d array, or Series
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an
integer/float number
box : boolean, default True
- If True returns a Timedelta/TimedeltaIndex of the results
- if False returns a np.timedelta64 or ndarray of values of dtype
timedelta64[ns]
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
ret : timedelta64/arrays of timedelta64 if parsing succeeded
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02',
'00:00:03', '00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
"""
unit = _validate_timedelta_unit(unit)
if errors not in ('ignore', 'raise', 'coerce'):
raise ValueError("errors must be one of 'ignore', "
"'raise', or 'coerce'}")
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
from pandas import Series
values = _convert_listlike(arg._values, unit=unit,
box=False, errors=errors)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, box=box,
errors=errors, name=arg.name)
elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1:
return _convert_listlike(arg, unit=unit, box=box, errors=errors)
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, timedelta, list, tuple, '
'1-d array, or Series')
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit,
box=box, errors=errors)
_unit_map = {
'Y': 'Y',
'y': 'Y',
'W': 'W',
'w': 'W',
'D': 'D',
'd': 'D',
'days': 'D',
'Days': 'D',
'day': 'D',
'Day': 'D',
'M': 'M',
'H': 'h',
'h': 'h',
'm': 'm',
'T': 'm',
'S': 's',
's': 's',
'L': 'ms',
'MS': 'ms',
'ms': 'ms',
'US': 'us',
'us': 'us',
'NS': 'ns',
'ns': 'ns',
}
def _validate_timedelta_unit(arg):
""" provide validation / translation for timedelta short units """
try:
return _unit_map[arg]
except:
if arg is None:
return 'ns'
raise ValueError("invalid timedelta unit {0} provided".format(arg))
def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
"""Convert string 'r' to a timedelta object."""
try:
result = tslib.convert_to_timedelta64(r, unit)
except ValueError:
if errors == 'raise':
raise
elif errors == 'ignore':
return r
# coerce
result = pd.NaT
if box:
result = tslib.Timedelta(result)
return result
def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'):
arg = np.array(list(arg), dtype='O')
# these are shortcut-able
if is_timedelta64_dtype(arg):
value = arg.astype('timedelta64[ns]')
elif is_integer_dtype(arg):
value = arg.astype('timedelta64[{0}]'.format(
unit)).astype('timedelta64[ns]', copy=False)
else:
try:
value = tslib.array_to_timedelta64(_ensure_object(arg),
unit=unit, errors=errors)
value = value.astype('timedelta64[ns]', copy=False)
except ValueError:
if errors == 'ignore':
return arg
else:
# This else-block accounts for the cases when errors='raise'
# and errors='coerce'. If errors == 'raise', these errors
# should be raised. If errors == 'coerce', we shouldn't
# expect any errors to be raised, since all parsing errors
# cause coercion to pd.NaT. However, if an error / bug is
# introduced that causes an Exception to be raised, we would
# like to surface it.
raise
if box:
from pandas import TimedeltaIndex
value = TimedeltaIndex(value, unit='ns', name=name)
return value
| apache-2.0 |
redreamality/daft | examples/galex.py | 7 | 2540 | """
The GALEX Photon Catalog
========================
This is the Hogg \& Schiminovich model for how photons turn into
counts in the GALEX satellite data stream. Note the use of relative
positioning.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
pgm = daft.PGM([5.4, 5.4], origin=[1.2, 1.2])
wide = 1.5
verywide = 1.5 * wide
dy = 0.75
# electrons
el_x, el_y = 2., 2.
pgm.add_plate(daft.Plate([el_x - 0.6, el_y - 0.6, 2.2, 2 * dy + 0.3], label="electrons $i$"))
pgm.add_node(daft.Node("xabc", r"xa$_i$,xabc$_i$,ya$_i$,\textit{etc}", el_x + 0.5, el_y + 0 * dy, aspect=2.3 * wide, observed=True))
pgm.add_node(daft.Node("xyti", r"$x_i,y_i,t_i$", el_x + 1., el_y + 1 * dy, aspect=wide))
pgm.add_edge("xyti", "xabc")
# intensity fields
ph_x, ph_y = el_x + 2.5, el_y + 3 * dy
pgm.add_node(daft.Node("Ixyt", r"$I_{\nu}(x,y,t)$", ph_x, ph_y, aspect=verywide))
pgm.add_edge("Ixyt", "xyti")
pgm.add_node(daft.Node("Ixnt", r"$I_{\nu}(\xi,\eta,t)$", ph_x, ph_y + 1 * dy, aspect=verywide))
pgm.add_edge("Ixnt", "Ixyt")
pgm.add_node(daft.Node("Iadt", r"$I_{\nu}(\alpha,\delta,t)$", ph_x, ph_y + 2 * dy, aspect=verywide))
pgm.add_edge("Iadt", "Ixnt")
# s/c
sc_x, sc_y = ph_x + 1.5, ph_y - 1.5 * dy
pgm.add_node(daft.Node("dark", r"dark", sc_x, sc_y - 1 * dy, aspect=wide))
pgm.add_edge("dark", "xyti")
pgm.add_node(daft.Node("flat", r"flat", sc_x, sc_y, aspect=wide))
pgm.add_edge("flat", "xyti")
pgm.add_node(daft.Node("att", r"att", sc_x, sc_y + 3 * dy))
pgm.add_edge("att", "Ixnt")
pgm.add_node(daft.Node("optics", r"optics", sc_x, sc_y + 2 * dy, aspect=wide))
pgm.add_edge("optics", "Ixyt")
pgm.add_node(daft.Node("psf", r"psf", sc_x, sc_y + 1 * dy))
pgm.add_edge("psf", "xyti")
pgm.add_node(daft.Node("fee", r"f.e.e.", sc_x, sc_y - 2 * dy, aspect=wide))
pgm.add_edge("fee", "xabc")
# sky
pgm.add_node(daft.Node("sky", r"sky", sc_x, sc_y + 4 * dy))
pgm.add_edge("sky", "Iadt")
# stars
star_x, star_y = el_x, el_y + 4 * dy
pgm.add_plate(daft.Plate([star_x - 0.6, star_y - 0.6, 2.2, 2 * dy + 0.3], label="stars $n$"))
pgm.add_node(daft.Node("star adt", r"$I_{\nu,n}(\alpha,\delta,t)$", star_x + 0.5, star_y + 1 * dy, aspect=verywide))
pgm.add_edge("star adt", "Iadt")
pgm.add_node(daft.Node("star L", r"$L_{\nu,n}(t)$", star_x + 1, star_y, aspect=wide))
pgm.add_edge("star L", "star adt")
pgm.add_node(daft.Node("star pos", r"$\vec{x_n}$", star_x, star_y))
pgm.add_edge("star pos", "star adt")
# done
pgm.render()
pgm.figure.savefig("galex.pdf")
pgm.figure.savefig("galex.png", dpi=150)
| mit |
rabipanda/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 4 | 12759 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
dhruv13J/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
Windy-Ground/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
tttor/csipb-jamu-prj | predictor/connectivity/classifier/selfblm/devel.py | 1 | 5474 | #!/usr/bin/python
import numpy as np
import json
import time
import sys
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import MinMaxScaler
sys.path.append('../../utility')
import yamanishi_data_util as yam
sys.path.append('../cluster/kmedoid')
import kmedoid
from generateNegativeData import genNegativeData
from selfblm import SELFBLM
def main():
if len(sys.argv)!=5:
print ("python blmniisvm_experiment.py [DataSetCode] [evalMode]"
" [dataPath] [outPath]")
return
classParam = dict(name='blmnii',proba=True)
dataset = sys.argv[1]
evalMode = sys.argv[2]
dataPath = sys.argv[3]
outPath = sys.argv[4]
print "Building Data"
connMat,comList,proList = yam.loadComProConnMat(dataset,dataPath+"/Adjacency")
kernel = yam.loadKernel(dataset,dataPath)
comListIdx = [i for i,_ in enumerate(comList)]
proListIdx = [i for i,_ in enumerate(proList)]
nComp = len(comList)
nProtein = len(proList)
comSimMat = np.zeros((nComp,nComp), dtype=float)
proSimMat = np.zeros((nProtein,nProtein), dtype=float)
for row,i in enumerate(comList):
for col,j in enumerate(comList):
comSimMat[row][col] = (kernel[(i,j)]+kernel[(j,i)])/2
for row,i in enumerate(proList):
for col,j in enumerate(proList):
proSimMat[row][col] = (kernel[(i,j)]+kernel[(j,i)])/2
comSimMat = regularizationKernel(comSimMat)
proSimMat = regularizationKernel(proSimMat)
print "Clustering"
comDisMat = kmedoid.simToDis(comSimMat)
proDisMat = kmedoid.simToDis(proSimMat)
_,proClust = kmedoid.kMedoids(len(proList)/2, proDisMat)
_,comClust = kmedoid.kMedoids(len(comList)/2, comDisMat)
print "Generate Negative Data"
connMat = genNegativeData(connMat,proClust,comClust)
# PLACEHOLDER
# Split Data
pairData = []
connList = []
print "Split Dataset..."
if evalMode == "loocv":
nFold = len(comListIdx)
kSplit = KFold(n_splits=nFold,shuffle=True)
comSplit = kSplit.split(comListIdx)
nFold = len(proListIdx)
kSplit = KFold(n_splits=nFold,shuffle=True)
proSplit = kSplit.split(proListIdx)
elif evalMode == "kfold":
nFold = 10
kSplit = KFold(n_splits=nFold, shuffle=True)
comSplit = kSplit.split(comListIdx)
proSplit = kSplit.split(proListIdx)
else:
assert(False)
predictedData = np.zeros((len(comList),len(proList)),dtype=float)
splitPred = []
proTestList = []
proTrainList = []
comTestList = []
comTrainList = []
for trainIndex, testIndex in proSplit:
proTestList.append([i for i in testIndex])
proTrainList.append([i for i in trainIndex])
for trainIndex, testIndex in comSplit:
comTestList.append([i for i in testIndex])
comTrainList.append([i for i in trainIndex])
predRes = []
testData = []
print "Predicting..."
for ii,i in enumerate(comTestList):
for jj,j in enumerate(proTestList):
sys.stdout.write("\r%03d of %03d||%03d of %03d" %
(jj+1, len(proTestList), ii+1,len(comTestList),))
sys.stdout.flush()
predictor = SELFBLM(classParam, connMat, comSimMat, proSimMat,
[comTrainList[ii],proTrainList[jj]],[i,j])
for comp in i:
for prot in j:
predRes.append(predictor.predict([(comp,prot)]))
if connMat[comp][prot] == 1:
testData.append(1)
else:
testData.append(-1)
# run core selfBLM
# Evaluate prediction
print "\nCalculate Performance"
key = 'PredictionUsingSelfBLM'
precision, recall, _ = precision_recall_curve(testData, predRes)
prAUC = average_precision_score(testData, predRes, average='micro')
print "Visualiation"
lineType = 'k-.'
perf = {'precision': precision, 'recall': recall, 'prAUC': prAUC,
'lineType': lineType}
perf2 = {'prAUC': prAUC, 'nTest': nComp*nProtein}
with open(outPath+'perf_selfblm_'+evalMode+'_'+dataset+'_perf.json', 'w') as fp:
json.dump(perf2, fp, indent=2, sort_keys=True)
plt.clf()
plt.figure()
plt.plot(perf['recall'], perf['precision'], perf['lineType'], label= key+' (area = %0.2f)' % perf['prAUC'], lw=2)
plt.ylim([-0.05, 1.05])
plt.xlim([-0.05, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall Curve')
plt.legend(loc="lower left")
plt.savefig(outPath+'/pr_curve_'+ dataset +'_'+evalMode+'_selfblm.png', bbox_inches='tight')
# http://stackoverflow.com/questions/29644180/gram-matrix-kernel-in-svms-not-positive-semi-definite?rq=1
def regularizationKernel(mat):
eps = 0.1
m,n = mat.shape
assert(m==n)
while isPSDKernel(mat) == False:
for i in range(m):
mat[i][i] = mat[i][i] + eps
return mat
def isPSDKernel(mat,eps = 1e-8):
E,V = np.linalg.eigh(mat)
return np.all(E > -eps) and np.all(np.isreal(E))
if __name__ == '__main__':
start_time = time.time()
main()
print "Program is running for :"+str(time.time()-start_time)
| mit |
appapantula/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
Averroes/statsmodels | statsmodels/graphics/tests/test_factorplots.py | 27 | 1513 | import numpy as np
from nose import SkipTest
from pandas import Series
from statsmodels.graphics.factorplots import interaction_plot
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
class TestInteractionPlot(object):
@classmethod
def setupClass(cls):
if not have_matplotlib:
raise SkipTest('matplotlib not available')
np.random.seed(12345)
cls.weight = np.random.randint(1,4,size=60)
cls.duration = np.random.randint(1,3,size=60)
cls.days = np.log(np.random.randint(1,30, size=60))
def test_plot_both(self):
fig = interaction_plot(self.weight, self.duration, self.days,
colors=['red','blue'], markers=['D','^'], ms=10)
plt.close(fig)
def test_plot_rainbow(self):
fig = interaction_plot(self.weight, self.duration, self.days,
markers=['D','^'], ms=10)
plt.close(fig)
def test_plot_pandas(self):
weight = Series(self.weight, name='Weight')
duration = Series(self.duration, name='Duration')
days = Series(self.days, name='Days')
fig = interaction_plot(weight, duration, days,
markers=['D','^'], ms=10)
ax = fig.axes[0]
trace = ax.get_legend().get_title().get_text()
assert trace == 'Duration'
assert ax.get_ylabel() == 'mean of Days'
assert ax.get_xlabel() == 'Weight'
plt.close(fig)
| bsd-3-clause |
GORDON17/py_rrfm_re | server/services/similarity_service_backup.py | 2 | 13374 | import os, json, gc
import pandas as pd
import numpy as np
from scipy.spatial import distance
from scipy.stats import pearsonr
from urllib2 import Request, urlopen
from pandas.io.json import json_normalize
from configurations.env_configs import *
# def interests_sim(id):
# df_new = _matrix()
# df_profile_t = df_new[df_new.columns[13:]]
# print df_profile_t.shape
# index = _index_of(id, df_new)
# profile_d = distance.pdist(df_profile_t, metric='matching')
# profile_D = distance.squareform(profile_d)
# p_r = profile_D[index].tolist()
# sim = pd.Series(p_r)
# df_new['count_of_interests'] = (1 - sim.values) * len(df_profile_t.columns)
# df_new['count_of_interests'] = df_new['count_of_interests'].astype(int)
# List = [i[0] for i in sorted(enumerate(p_r), key=lambda x:x[1])][0:21]
# return df_new.ix[List][['V2', 'count_of_interests']]#['V2']
# def interests_sim_with_loc(id, location, uri):
# # df_new = _matrix_with_loc(location)
# # df_profile_t = df_new[df_new.columns[14:]]
# df_new = _interests_matrix_with_loc(location, uri)
# df_profile_t = df_new[df_new.columns[2:]]
# index = _index_of(id, df_new)
# profile_d = distance.pdist(df_profile_t, metric='matching')
# profile_D = distance.squareform(profile_d)
# p_r = profile_D[index].tolist()
# del profile_d
# del profile_D
# gc.collect()
# sim = pd.Series(p_r)
# df_new['interest_similarity'] = 1 - sim.values
# df_new['interest_count'] = (1 - sim.values) * len(df_profile_t.columns)
# df_new['interest_count'] = df_new['interest_count'].astype(int)
# # List = [i[0] for i in sorted(enumerate(p_r), key=lambda x:x[1])][0:21]
# # return df_new.ix[List][['account_id', 'interest_count']]#['V2']
# return df_new
def events_sim(id):
df_new = _matrix()
df_event_t = df_new[df_new.columns[2:13]]
index = _index_of(id, df_new)
df_event_t = df_event_t.div(df_event_t.sum(axis=1), axis=0)
# event_D = np.corrcoef(df_event_t)
# e_r = event_D[index].tolist()
# sim = pd.Series(e_r)
# df_new['similarity_percentage'] = sim.values
# List = [i[0] for i in sorted(enumerate(e_r), key=lambda x:x[1], reverse=True)][0:21]
# print event_D.shape
event_d = distance.pdist(df_event_t, metric='euclidean')
event_D = distance.squareform(event_d)
e_r = event_D[index].tolist()
sim = pd.Series(e_r)
df_new['similarity_percentage'] = 1 - sim.values
List = [i[0] for i in sorted(enumerate(e_r), key=lambda x:x[1])][0:21]
return df_new.ix[List][['V2', 'similarity_percentage']]#['V2']
def events_sim_with_loc(id, location, uri):
df_new = _events_matrix_with_loc(location, uri)
df_event_t = df_new[df_new.columns[2:]]
index = _index_of(id, df_new)
df_event_t = df_event_t.div(df_event_t.sum(axis=1), axis=0)
# event_D = np.corrcoef(df_event_t)
# e_r = event_D[index].tolist()
# sim = pd.Series(e_r)
# df_new['similarity_percentage'] = sim.values
# List = [i[0] for i in sorted(enumerate(e_r), key=lambda x:x[1], reverse=True)][0:21]
# print event_D.shape
event_d = distance.pdist(df_event_t, metric='correlation')
event_D = distance.squareform(event_d)
del df_event_t
del event_d
e_r = event_D[index].tolist()
del event_D
gc.collect()
sim = pd.Series(e_r)
df_new['similarity_percentage'] = 1 - sim.values
# List = [i[0] for i in sorted(enumerate(e_r), key=lambda x:x[1])][0:21]
# return df_new.ix[List][['account_id', 'email', 'similarity_percentage']]#['V2']
return df_new
def _matrix_with_loc(location):
df_profile = _social_interests_data()
df_profile[['account_id']] = df_profile[['account_id']].astype(int)
df_profile['location'].fillna('empty', inplace=True)
df_profile = pd.pivot_table(df_profile, index=['account_id', 'location'], columns='social', values='indicator')
df_profile_t = df_profile.reset_index()
df_profile_t = df_profile_t.fillna(value=0)
df_profile_t = df_profile_t[(df_profile_t.location == '') | (df_profile_t.location == 'empty') | (df_profile_t.location.str.contains(location))]
df_event = _event_types_data()
df_event[['account_id']] = df_event[['account_id']].astype(int)
df_event[['count']] = df_event[['count']].astype(int)
df_event[['event_type']] = df_event[['event_type']].astype(int)
df_event = pd.pivot_table(df_event, index=['account_id', 'email'], columns='event_type', values='count')
df_event_t = df_event.reset_index()
df_event_t = df_event_t.fillna(value=0)
df_new = df_event_t.set_index('account_id').join(df_profile_t.set_index('account_id'))
df_new = df_new.dropna()
df_new = df_new.sort_values(by='email', ascending=False)
df_new = df_new.reset_index()
print df_new[df_new.account_id == 28071].index.tolist()[0]
return df_new
def _matrix():
df_profile = pd.read_csv(_datasets_path() + 'profile dataset.csv', names=['V1', 'V2', 'V3'])
df_profile = df_profile.pivot(index='V1', columns='V2', values='V3')
df_profile_t = df_profile.reset_index()
df_profile_t = df_profile_t.fillna(value=0)
print(df_profile_t.shape)
df_event = pd.read_csv(_datasets_path() + 'query event type.csv', names=['V1', 'V2', 'V3', 'V4'])
df_event = pd.pivot_table(df_event, index=['V1', 'V2'], columns='V3', values='V4')
df_event_t = df_event.reset_index()
df_event_t = df_event_t.fillna(value=0)
print(df_event_t.shape)
df_new = df_event_t.set_index('V1').join(df_profile_t.set_index('V1'))
print df_new.shape
df_new = df_new.dropna()
df_new = df_new.sort_values(by='V2', ascending=False)
df_new = df_new.reset_index()
print df_new.shape
return df_new
def _index_of(id, df):
return df[df.account_id == id].index.tolist()[0]
def _datasets_path():
return os.path.abspath("") + "/datasets/"
# def _social_interests_data():
# print ("Sending request to:", SOCIAL_INTERESTS_URI)
# request=Request(SOCIAL_INTERESTS_URI)
# profiles = json.loads(urlopen(request).read())
# df = pd.DataFrame(profiles)
# print df.shape
# return df
# def _event_types_data():
# print ("Sending request to:", EVENT_TYPES_URI)
# request=Request(EVENT_TYPES_URI)
# events = json.loads(urlopen(request).read())
# df = pd.DataFrame(events)
# print df.shape
# return df
def _request_data(uri):
print ("Sending request to:", uri)
request = Request(uri)
request.add_header('HTTP_X_IVY_SESSION_TOKEN', RAILS_TOKEN)
data = json.loads(urlopen(request).read())
df = pd.DataFrame(data)
print ("Requested data shape:", df.shape)
return df
# def _interests_matrix_with_loc(location, uri):
# df_profile = _request_data(uri)
# df_profile[['account_id']] = df_profile[['account_id']].astype(int)
# df_profile['location'].fillna('empty', inplace=True)
# df_profile_t = pd.pivot_table(df_profile, index=['account_id', 'location'], columns=['social'], values='indicator')
# del df_profile
# gc.collect()
# df_profile_t = df_profile_t.reset_index()
# df_profile_t = df_profile_t.fillna(value=0)
# df_profile_t = df_profile_t[(df_profile_t.location == '') | (df_profile_t.location == 'empty') | (df_profile_t.location.str.contains(location))]
# df_profile_f = df_profile_t.dropna()
# del df_profile_t
# gc.collect()
# df_profile_s = df_profile_f.sort_values(by='account_id', ascending=True)
# del df_profile_f
# gc.collect()
# df_profile_s.reset_index(drop=True, inplace=True)
# return df_profile_s
def _events_matrix_with_loc(location, uri):
df_event = _request_data(uri)
df_event[['account_id']] = df_event[['account_id']].astype(int)
df_event[['count']] = df_event[['count']].astype(int)
df_event[['event_type']] = df_event[['event_type']].astype(int)
df_event_t = pd.pivot_table(df_event, index=['account_id', 'email'], columns='event_type', values='count')
del df_event
gc.collect()
df_event_t.reset_index(inplace=True)
df_event_t = df_event_t.fillna(value=0)
df_event_s = df_event_t.sort_values(by='account_id', ascending=True)
del df_event_t
gc.collect()
df_event_s.reset_index(drop=True, inplace=True)
return df_event_s
# interest similarity #
# from configurations.env_configs import *
# from mongodb import update_interests_table
# from account_service import *
# def process_interest_similarity(uri):
# # df_accounts = _request_data(ACCOUNTS_URI)
# # df_accounts['location'].fillna('empty', inplace=True)
# df_accounts = get_accounts()
# df_profile = _request_data(uri)
# size = df_profile.size()
# for index, account in df_accounts.iterrows():
# if not df_profile[df_profile.account_id == account.id].empty:
# size = size - 1
# print("interest similarity account left", size)
# print("processing interest similarity for account: ", account.id)
# df = _interests_sim_with_loc(account, df_profile)
# update_interests_table(account.id, df, INTEREST_TYPES['social'])
# print("finished interest similarity for account: ", account.id)
# def _interests_sim_with_loc(account, df_profile):
# # df_new = _matrix_with_loc(location)
# # df_profile_t = df_new[df_new.columns[14:]]
# df_new = _interests_matrix_with_loc(account.location, df_profile)
# df_profile_t = df_new[df_new.columns[2:]]
# index = _index_of(account.id, df_new)
# profile_d = distance.pdist(df_profile_t, metric='matching')
# profile_D = distance.squareform(profile_d)
# p_r = profile_D[index].tolist()
# del profile_d
# del profile_D
# gc.collect()
# sim = pd.Series(p_r)
# df_new['interest_similarity'] = 1 - sim.values
# df_new['interest_count'] = (1 - sim.values) * len(df_profile_t.columns)
# df_new['interest_count'] = df_new['interest_count'].astype(int)
# List = [i[0] for i in sorted(enumerate(p_r), key=lambda x:x[1])][0:21]
# return df_new.ix[List]
# # return df_new
# def _interests_matrix_with_loc(location, df):
# df_profile = df.copy()
# df_profile[['account_id']] = df_profile[['account_id']].astype(int)
# df_profile['location'].fillna('empty', inplace=True)
# df_profile_t = pd.pivot_table(df_profile, index=['account_id', 'location'], columns=['social'], values='indicator')
# del df_profile
# gc.collect()
# df_profile_t = df_profile_t.reset_index()
# df_profile_t = df_profile_t.fillna(value=0)
# df_profile_t = df_profile_t[(df_profile_t.location == '') | (df_profile_t.location == 'empty') | (df_profile_t.location.str.contains(location))]
# df_profile_f = df_profile_t.dropna()
# del df_profile_t
# gc.collect()
# df_profile_s = df_profile_f.sort_values(by='account_id', ascending=True)
# del df_profile_f
# gc.collect()
# df_profile_s.reset_index(drop=True, inplace=True)
# return df_profile_s
# refactor for performance
from mongodb import update_interests_table
def process_interest_similarity(uri, type, params):
df_profile = _request_data(uri)
structured_df = _manipulate_profile_matrix(df_profile)
del df_profile
gc.collect()
# df_profile_t = structured_df[structured_df.columns[4:]]
print ("Structured profile matrix shape:", structured_df[structured_df.columns[4:]].shape)
df_interest_sim = _calculate_similarity(structured_df[structured_df.columns[4:]])
profile_len = len(structured_df[structured_df.columns[4:]].columns)
# del df_profile_t
# gc.collect()
count = 1
prepared_df = structured_df[['account_id', 'location', 'nationality', 'chapter']].copy()
del structured_df
gc.collect()
for index, profile in prepared_df.iterrows():
sim_for_account = df_interest_sim[index].tolist()
sim_list = pd.Series(sim_for_account)
df = prepared_df.copy()
df['interest_similarity'] = 1 - sim_list.values
df['interest_count'] = (1 - sim_list.values) * profile_len
df['interest_count'] = df['interest_count'].astype(int)
df_profile_f = _filtered_profile_matrix(df, profile, params) #df[(df.location == '') | (df.location == 'empty') | (df.location.str.contains(profile.location))]
df_profile_r = df_profile_f.sort_values(by='interest_similarity', ascending=0)[1:10]
update_interests_table(profile.account_id, df_profile_r, type)
print('Processed interest similarity: ', count)
count += 1
del df_interest_sim
del prepared_df
gc.collect()
def _filtered_profile_matrix(df, profile, params):
df_copy = df.copy()
if params['location']:
df_copy = df_copy[(df_copy.location.str.contains(profile.location))] #(df_copy.location == '') | (df_copy.location == 'empty') |
if params['chapter']:
df_copy = df_copy[(df_copy.chapter == profile.chapter)]
if params['nationality']:
df_copy = df_copy[(df_copy.nationality == profile.nationality)]
return df_copy
def _manipulate_profile_matrix(df):
df_profile = df.copy()
df_profile[['account_id']] = df_profile[['account_id']].astype(int)
df_profile[['chapter']] = df_profile[['chapter']].astype(int)
df_profile['location'].fillna('empty', inplace=True)
df_profile['nationality'].fillna('empty', inplace=True)
df_profile['chapter'].fillna(0, inplace=True)
df_profile_t = pd.pivot_table(df_profile, index=['account_id', 'location', 'nationality', 'chapter'], columns=['social'], values='indicator')
del df_profile
gc.collect()
df_profile_t.reset_index(inplace=True)
df_profile_t.fillna(value=0, inplace=True)
df_profile_t.dropna(inplace=True)
df_profile_t.sort_values(by='account_id', ascending=True, inplace=True)
df_profile_t.reset_index(drop=True, inplace=True)
return df_profile_t
def _calculate_similarity(df):
profile_d = distance.pdist(df, metric='matching')
profile_D = distance.squareform(profile_d)
del profile_d
gc.collect()
return profile_D
| mit |
yavalvas/yav_com | build/matplotlib/unit/threading_test.py | 1 | 1771 | #! /usr/bin/python
"""
Test by Karen Tracey for threading problem reported in
http://www.mail-archive.com/[email protected]/msg04819.html
and solved by JDH in git commit 175e3ec5bed9144.
"""
from __future__ import print_function
import os
import threading
import traceback
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
thread_count = 8
max_iterations = 50
exception_raised = False
def png_thread(tn):
png_fname = 'out%d.png' % tn
vals = 100 + 15 * np.random.randn(10000)
i = 0
excp = None
global exception_raised
while not exception_raised and i < max_iterations:
i += 1
png_f = open(png_fname, 'wb')
try:
fig = Figure()
ax = fig.add_subplot(111)
ax.hist(vals, 50)
FigureCanvas(fig).print_png(png_f)
except Exception, excp:
pass
png_f.close()
if excp:
print('png_thread %d failed on iteration %d:' % (tn, i))
print(traceback.format_exc(excp))
exception_raised = True
else:
print('png_thread %d completed iteration %d.' % (tn, i))
os.unlink(png_fname)
def main(tc):
threads = []
for i in range(tc):
threads.append(threading.Thread(target=png_thread, args=(i+1,)))
for t in threads:
t.start()
for t in threads:
t.join()
if not exception_raised:
msg = 'Success! %d threads completed %d iterations with no exceptions raised.'
else:
msg = 'Failed! Exception raised before %d threads completed %d iterations.'
print(msg % (tc, max_iterations))
if __name__== "__main__":
main(thread_count)
| mit |
joeldg/bowhead | app/Scripts/close_prediction.py | 1 | 2487 | #!/usr/local/bin/python
import pandas as pd
import redis
import sys
from os.path import join, dirname
import os
import dotenv
import numpy as np
from sklearn.linear_model import LinearRegression
dotenv.load(join(dirname(__file__), '../../.env'))
r = redis.StrictRedis(host=os.environ.get('REDIS_HOST'), port=os.environ.get('REDIS_PORT'), db=0)
book = r.get('laravel:tempbook')
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
TDATA=StringIO(book)
df = pd.read_csv(TDATA)
to_forecast = df.close.values
dates = df.id.values
# mean absolute percentage error
def mape(ypred, ytrue):
""" returns the mean absolute percentage error """
idx = ytrue != 0.0
return 100*np.mean(np.abs(ypred[idx]-ytrue[idx])/ytrue[idx])
def organize_data(to_forecast, window, horizon):
"""
Input:
to_forecast, univariate time series organized as numpy array
window, number of items to use in the forecast window
horizon, horizon of the forecast
Output:
X, a matrix where each row contains a forecast window
y, the target values for each row of X
"""
shape = to_forecast.shape[:-1] + (to_forecast.shape[-1] - window + 1, window)
strides = to_forecast.strides + (to_forecast.strides[-1],)
X = np.lib.stride_tricks.as_strided(to_forecast, shape=shape, strides=strides)
y = np.array([X[i+horizon][-1] for i in range(len(X)-horizon)])
return X[:-horizon], y
k = 4 # number of previous observations to use
h = 1 # forecast horizon
X,y = organize_data(to_forecast, k, h)
m = 10 # number of samples to take in account
regressor = LinearRegression(normalize=True)
regressor.fit(X[:m], y[:m])
#print regressor.coef_
#print 'The error is:%0.2f%%' % mape(regressor.predict(X[m:]),y[m:])
#print y[m:]
#print regressor.predict(X[m:])
#print str(regressor.predict(X[m:])).strip('[]')
#print ', '.join(map(str, y[m:]))
# print out and pop off the last number for the prediction.
print ','.join(map(str, regressor.predict(X[m:])))
"""
http://glowingpython.blogspot.com/2015/01/forecasting-beer-consumption-with.html
figure(figsize=(8,6))
plot(y, label='True demand', color='#377EB8', linewidth=2)
plot(regressor.predict(X),
'--', color='#EB3737', linewidth=3, label='Prediction')
plot(y[:m], label='Train data', color='#3700B8', linewidth=2)
xticks(arange(len(dates))[1::4],dates[1::4], rotation=45)
legend(loc='upper right')
ylabel('beer consumed (millions of litres)')
show()
"""
| apache-2.0 |
zxc2694/STM32F429_Quadrotor | program/pythonGUI/ultrasonic.py | 1 | 2344 | ################################################################################
# File name: ultrasonic.py
#
# Function: Display the flight height of quadcopter from stm32f4 using Python (matplotlib)
#
# Reference:http://electronut.in/plotting-real-time-data-from-arduino-using-python/
#
################################################################################
import sys, serial
import numpy as np
from time import sleep
from collections import deque
from matplotlib import pyplot as plt
# class that holds analog data for N samples
class AnalogData:
# constr
def __init__(self, maxLen):
self.d = deque([0.0]*maxLen)
self.maxLen = maxLen
# ring buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
#Add new data
def add(self, data):
assert(len(data) == 1)
self.addToBuf(self.d, data[0])
# plot class
class AnalogPlot:
# constr
def __init__(self, analogData):
# set plot to animated
plt.ion()
plt.figure(figsize=(9,8))
self.dline, = plt.plot(analogData.d,label="Distance",color="red")
plt.xlabel("Time")
plt.ylabel("distance (cm)")
plt.title("Ultrasonic distance")
plt.legend() #Show label figure.
plt.ylim([0, 2000]) # Vertical axis scale.
plt.grid()
# update plot
def update(self, analogData):
self.dline.set_ydata(analogData.d)
plt.draw()
def main():
# expects 1 arg - serial port string
if(len(sys.argv) != 2):
print "Type:"
print "sudo chmod 777 /dev/ttyUSB0"
print "python ultrasonic.py '/dev/ttyUSB0'" # command hint
exit(1)
#strPort = '/dev/tty.usbserial-A7006Yqh'
strPort = sys.argv[1];
# plot parameters
analogData = AnalogData(200) # Horizontal axis scale.
analogPlot = AnalogPlot(analogData)
print "plotting data..."
a = 1
# open serial port
ser = serial.Serial(strPort, 9600) # set Baudrate
while True:
try:
line = ser.readline()
data = [float(val) for val in line.split()]
if (a < 10): # to ignore the wrong data that cause the failure while python GUI start
a = a + 1
else:
print data[0]
if(len(data) == 1): # number of data sets
analogData.add(data)
analogPlot.update(analogData)
except KeyboardInterrupt:
print "exiting"
break
# close serial
ser.flush()
ser.close()
# call main
if __name__ == '__main__':
main()
| mit |
louispotok/pandas | pandas/tests/indexes/interval/test_construction.py | 1 | 14167 | from __future__ import division
import pytest
import numpy as np
from functools import partial
from pandas import (
Interval, IntervalIndex, Index, Int64Index, Float64Index, Categorical,
CategoricalIndex, date_range, timedelta_range, period_range, notna)
from pandas.compat import lzip
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
import pandas.core.common as com
import pandas.util.testing as tm
@pytest.fixture(params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(params=[None, 'foo'])
def name(request):
return request.param
class Base(object):
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
get_kwargs_from_breaks to the expected format.
"""
@pytest.mark.parametrize('breaks', [
[3, 14, 15, 92, 653],
np.arange(10, dtype='int64'),
Int64Index(range(-10, 11)),
Float64Index(np.arange(20, 30, 0.5)),
date_range('20180101', periods=10),
date_range('20180101', periods=10, tz='US/Eastern'),
timedelta_range('1 day', periods=10)])
def test_constructor(self, constructor, breaks, closed, name):
result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
result = constructor(closed=closed, name=name, **result_kwargs)
assert result.closed == closed
assert result.name == name
assert result.dtype.subtype == getattr(breaks, 'dtype', 'int64')
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
@pytest.mark.parametrize('breaks, subtype', [
(Int64Index([0, 1, 2, 3, 4]), 'float64'),
(Int64Index([0, 1, 2, 3, 4]), 'datetime64[ns]'),
(Int64Index([0, 1, 2, 3, 4]), 'timedelta64[ns]'),
(Float64Index([0, 1, 2, 3, 4]), 'int64'),
(date_range('2017-01-01', periods=5), 'int64'),
(timedelta_range('1 day', periods=5), 'int64')])
def test_constructor_dtype(self, constructor, breaks, subtype):
# GH 19262: conversion via dtype parameter
expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
iv_dtype = IntervalDtype(subtype)
for dtype in (iv_dtype, str(iv_dtype)):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
[np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
def test_constructor_nan(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_subtype = np.float64
expected_values = np.array(breaks[:-1], dtype=object)
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('breaks', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype='datetime64[ns]'),
np.array([], dtype='timedelta64[ns]')])
def test_constructor_empty(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_values = np.array([], dtype=object)
expected_subtype = getattr(breaks, 'dtype', np.int64)
assert result.empty
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('breaks', [
tuple('0123456789'),
list('abcdefghij'),
np.array(list('abcdefghij'), dtype=object),
np.array(list('abcdefghij'), dtype='<U1')])
def test_constructor_string(self, constructor, breaks):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with tm.assert_raises_regex(TypeError, msg):
constructor(**self.get_kwargs_from_breaks(breaks))
@pytest.mark.parametrize('cat_constructor', [
Categorical, CategoricalIndex])
def test_constructor_categorical_valid(self, constructor, cat_constructor):
# GH 21243/21253
if isinstance(constructor, partial) and constructor.func is Index:
# Index is defined to create CategoricalIndex from categorical data
pytest.skip()
breaks = np.arange(10, dtype='int64')
expected = IntervalIndex.from_breaks(breaks)
cat_breaks = cat_constructor(breaks)
result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
result = constructor(**result_kwargs)
tm.assert_index_equal(result, expected)
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
# invalid closed
msg = "invalid option for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
constructor(closed='invalid', **filler)
# unsupported dtype
msg = 'dtype must be an IntervalDtype, got int64'
with tm.assert_raises_regex(TypeError, msg):
constructor(dtype='int64', **filler)
# invalid dtype
msg = 'data type "invalid" not understood'
with tm.assert_raises_regex(TypeError, msg):
constructor(dtype='invalid', **filler)
# no point in nesting periods in an IntervalIndex
periods = period_range('2000-01-01', periods=10)
periods_kwargs = self.get_kwargs_from_breaks(periods)
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
constructor(**periods_kwargs)
# decreasing values
decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
constructor(**decreasing_kwargs)
class TestFromArrays(Base):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_arrays
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
"""
return {'left': breaks[:-1], 'right': breaks[1:]}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list('01234abcde'), ordered=True)
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_arrays(data[:-1], data[1:])
# unequal length
left = [0, 1, 2]
right = [2, 3]
msg = 'left and right must have the same length'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(left, right)
@pytest.mark.parametrize('left_subtype, right_subtype', [
(np.int64, np.float64), (np.float64, np.int64)])
def test_mixed_float_int(self, left_subtype, right_subtype):
"""mixed int/float left/right results in float for both sides"""
left = np.arange(9, dtype=left_subtype)
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
expected_left = Float64Index(left)
expected_right = Float64Index(right)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
tm.assert_index_equal(result.right, expected_right)
assert result.dtype.subtype == expected_subtype
class TestFromBreaks(Base):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {'breaks': breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list('01234abcde'), ordered=True)
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
class TestFromTuples(Base):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if len(breaks) == 0:
return {'data': breaks}
tuples = lzip(breaks[:-1], breaks[1:])
if isinstance(breaks, (list, tuple)):
return {'data': tuples}
elif is_categorical_dtype(breaks):
return {'data': breaks._constructor(tuples)}
return {'data': com._asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = 'IntervalIndex.from_tuples received an invalid item, 2'
with tm.assert_raises_regex(TypeError, msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = 'IntervalIndex.from_tuples requires tuples of length 2, got {t}'
with tm.assert_raises_regex(ValueError, msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with tm.assert_raises_regex(ValueError, msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an elemenent
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
class TestClassConstructors(Base):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(params=[IntervalIndex, partial(Index, dtype='interval')],
ids=['IntervalIndex', 'Index'])
def constructor(self, request):
return request.param
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
if len(breaks) == 0:
return {'data': breaks}
ivs = [Interval(l, r, closed) if notna(l) else l
for l, r in zip(breaks[:-1], breaks[1:])]
if isinstance(breaks, list):
return {'data': ivs}
elif is_categorical_dtype(breaks):
return {'data': breaks._constructor(ivs)}
return {'data': np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
"""
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass
def test_constructor_errors(self, constructor):
# mismatched closed inferred from intervals vs constructor.
ivs = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
constructor(ivs, closed='neither')
# mismatched closed within intervals
ivs = [Interval(0, 1, closed='right'), Interval(2, 3, closed='left')]
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
constructor(ivs)
# scalar
msg = (r'IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
constructor(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
constructor([0, 1])
class TestFromIntervals(TestClassConstructors):
"""
Tests for IntervalIndex.from_intervals, which is deprecated in favor of the
IntervalIndex constructor. Same tests as the IntervalIndex constructor,
plus deprecation test. Should only need to delete this class when removed.
"""
@pytest.fixture
def constructor(self):
def from_intervals_ignore_warnings(*args, **kwargs):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
return IntervalIndex.from_intervals(*args, **kwargs)
return from_intervals_ignore_warnings
def test_deprecated(self):
ivs = [Interval(0, 1), Interval(1, 2)]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
IntervalIndex.from_intervals(ivs)
| bsd-3-clause |
kadrlica/pointing | pointing/pointing.py | 1 | 29180 | #!/usr/bin/env python
""" Where you at? """
import sys,os
import logging
from collections import OrderedDict as odict
from datetime import datetime,timedelta,tzinfo
import dateutil.parser
import mpl_toolkits.basemap as basemap
from matplotlib.patches import Ellipse, Circle
import matplotlib.patheffects as patheffects
from _tkinter import TclError
import numpy as np
import pylab as plt
import ephem
__author__ = "Alex Drlica-Wagner"
__email__ = "[email protected]"
__version__ = "2.1.3"
MAXREF=5000 # Maximum number of refreshes
DECAM=1.1 # DECam radius (deg)
# Accurate DECam marker size depends on figsize and DPI
# This is a mess...
FIGSIZE=(10.5,8.5)
SCALE=np.sqrt((8.0*6.0)/(FIGSIZE[0]*FIGSIZE[1]))
DPI=80;
FILTERS = ['u','g','r','i','z','Y','VR']
BANDS = FILTERS + ['all']
COLORS = odict([
('none','black'),
('u','blue'),
('g','green'),
('r','red'),
('i','gold'),
('z','magenta'),
('Y','black'),
('VR','gray'),
])
# Allowed map projections
PROJ = odict([
('ortho' , dict(projection='ortho',celestial=True)),
('moll' , dict(projection='moll',celestial=True)),
('mol' , dict(projection='moll',celestial=True)),
('ait' , dict(projection='hammer',celestial=True)),
('mbt' , dict(projection='mbtfpq',celestial=True)),
('mbtfpq' , dict(projection='mbtfpq',celestial=True)),
('mcbryde', dict(projection='mbtfpq',celestial=True)),
])
# Derived from telra,teldec of 10000 exposures
SN = odict([
('E1',(7.874, -43.010)),
('E2',(9.500, -43.999)),
('X1',(34.476, -4.931)),
('X2',(35.664,-6.413)),
('X3',(36.449, -4.601)),
('S1',(42.818, 0.000)),
('S2',(41.193, -0.991)),
('C1',(54.274, -27.113)),
('C2',(54.274, -29.090)),
('C3',(52.647, -28.101)),
])
SN_LABELS = odict([
('SN-E',(8,-41)),
('SN-X',(35,-12)),
('SN-S',(45,1)),
('SN-C',(55,-35)),
])
# The allowed footprint outlines
FOOTPRINTS = ['none','des','des-sn','smash','maglites','bliss','decals','delve']
# CTIO location taken from:
#http://www.ctio.noao.edu/noao/content/Coordinates-Observatories-Cerro-Tololo-and-Cerro-Pachon
#http://arxiv.org/pdf/1210.1616v3.pdf
#(-30h 10m 10.73s, -70h 48m 23.52s, 2213m)
TEL_LON = -70.80653
TEL_LAT = -30.169647
TEL_HEIGHT = 2213
# Create the observatory object
CTIO = ephem.Observer()
CTIO.lon,CTIO.lat = str(TEL_LON),str(TEL_LAT)
CTIO.elevation = TEL_HEIGHT
def get_datadir():
""" Path to data directory. """
return os.path.join(os.path.dirname(os.path.realpath(__file__)),'data')
def setdefaults(kwargs,defaults):
""" set dictionary with defaults. """
for k,v in defaults.items():
kwargs.setdefault(k,v)
return kwargs
def gal2cel(glon, glat):
"""
Converts Galactic (deg) to Celestial J2000 (deg) coordinates
"""
glat = np.radians(glat)
sin_glat = np.sin(glat)
cos_glat = np.cos(glat)
glon = np.radians(glon)
ra_gp = np.radians(192.85948)
de_gp = np.radians(27.12825)
lcp = np.radians(122.932)
sin_lcp_glon = np.sin(lcp - glon)
cos_lcp_glon = np.cos(lcp - glon)
sin_d = (np.sin(de_gp) * sin_glat) \
+ (np.cos(de_gp) * cos_glat * cos_lcp_glon)
ramragp = np.arctan2(cos_glat * sin_lcp_glon,
(np.cos(de_gp) * sin_glat) \
- (np.sin(de_gp) * cos_glat * cos_lcp_glon))
dec = np.arcsin(sin_d)
ra = (ramragp + ra_gp + (2. * np.pi)) % (2. * np.pi)
return np.degrees(ra), np.degrees(dec)
def cel2gal(ra, dec):
"""
Converts Celestial J2000 (deg) to Calactic (deg) coordinates
"""
dec = np.radians(dec)
sin_dec = np.sin(dec)
cos_dec = np.cos(dec)
ra = np.radians(ra)
ra_gp = np.radians(192.85948)
de_gp = np.radians(27.12825)
sin_ra_gp = np.sin(ra - ra_gp)
cos_ra_gp = np.cos(ra - ra_gp)
lcp = np.radians(122.932)
sin_b = (np.sin(de_gp) * sin_dec) \
+ (np.cos(de_gp) * cos_dec * cos_ra_gp)
lcpml = np.arctan2(cos_dec * sin_ra_gp,
(np.cos(de_gp) * sin_dec) \
- (np.sin(de_gp) * cos_dec * cos_ra_gp))
glat = np.arcsin(sin_b)
glon = (lcp - lcpml + (2. * np.pi)) % (2. * np.pi)
return np.degrees(glon), np.degrees(glat)
# Stupid timezone definition
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def safe_proj(bmap,lon,lat,inverse=False):
""" Remove points outside of projection
Parameters:
-----------
bmap : basemap
lon : longitude
lat : latitude
inverse : inverse projection
Returns:
--------
x,y : projected coordinates
"""
x,y = bmap(np.atleast_1d(lon),np.atleast_1d(lat),inverse=inverse)
x[np.abs(x) > 1e29] = None
y[np.abs(y) > 1e29] = None
return x,y
def get_boundary(bmap,projection,fact=0.99):
# Check that point inside boundary
# Doesn't work for 'ait' and 'moll' projections
if projection in basemap._pseudocyl:
# This was estimated by eye...
rminor=9.00995e6; rmajor = 2*rminor
boundary = Ellipse((rmajor,rminor),
2*(fact*rmajor),2*(fact*rminor))
else:
boundary = Ellipse((bmap.rmajor,bmap.rminor),
2*(fact*bmap.rmajor),2*(fact*bmap.rminor))
return boundary
def airmass_angle(x=1.4):
""" Zenith angle for a given airmass limit """
return 90.-np.degrees(np.arcsin(1./x))
def load_data(opts):
""" Load the data (either from DB of file).
Parameters:
-----------
opts : command line options
Returns:
--------
data : numpy recarray
"""
since = parse_since(opts.since)
propid = '%' if opts.propid is None else opts.propid
dtype=[('expnum',int),('telra',float),('teldec',float),('filter',object)]
if opts.infile is None:
selection = ['id','telra','teldec','filter']
#filter = "exposed = TRUE AND flavor LIKE '%s' AND date > '%s' AND propid LIKE '%s' ORDER BY id DESC"%(opts.flavor,since.isoformat(),propid)
filter = "exposed = TRUE AND flavor SIMILAR TO '%s' AND date > '%s' AND propid LIKE '%s' ORDER BY id DESC"%(opts.flavor,since.isoformat(),propid)
# Use the FNAL mirror to avoid overloading CTIO
try: from database import Database
except ImportError: from pointing.database import Database
db = Database(dbname='db-'+opts.db)
db.connect()
query = "SELECT %s FROM exposure WHERE %s"%(','.join(selection),filter)
#query = "SELECT id as expnum,telra as ra,teldec as dec,filter as band FROM exposure WHERE exposed = TRUE AND flavor LIKE 'object' and telra between 80 and 82 AND teldec between -71 and -69"
data = db.execute(query)
if len(data): ret = np.rec.array(data,dtype=dtype)
else: ret = np.rec.recarray(0,dtype=dtype)
return ret
else:
return np.loadtxt(opts.infile,dtype=dtype)
def mjd(datetime):
""" Modified Julian Date (MJD) """
mjd_epoch = dateutil.parser.parse('1858-11-17T00:00:00Z')
mjd_date = (datetime-mjd_epoch).total_seconds()/float(24*60*60)
return mjd_date
def lmst(observatory):
""" Calculate Local Mean Sidereal Time (LMST) """
lmst = np.degrees(observatory.sidereal_time())
logging.debug('Using pyephem for LMST: %.3f'%lmst)
return lmst
def moon(datetime):
""" Moon location
Parameters:
-----------
datetime : the datetime of moon location request
Returns:
--------
(ra, dec), phase : moon parameters [(deg, deg), %]
"""
moon = ephem.Moon()
moon.compute(CTIO)
moon_phase = moon.moon_phase * 100
moon_ra,moon_dec = np.degrees([moon.ra,moon.dec])
return (moon_ra, moon_dec),moon_phase
def boolean(string):
""" Convert strings to booleans for argparse """
string = string.lower()
if string in ['0', 'f', 'false', 'no', 'off']:
return False
elif string in ['1', 't', 'true', 'yes', 'on']:
return True
else:
raise ValueError()
def splash_screen():
""" Splash text to print """
splash = """Running Alex Drlica-Wagner's DECam pointing script..."""
logging.info(splash)
def parse_utc(value):
""" Parse isoformat 'utc' option string. """
if value is None:
utc = datetime.now(tz=UTC())
elif isinstance(value,datetime):
utc = value
else:
utc = dateutil.parser.parse(value,tzinfos={'UTC':UTC})
logging.debug("UTC: %s"%utc.strftime('%Y-%m-%d %H:%M:%S'))
return utc
def parse_since(value):
""" Parse isoformat 'since' option string. """
if value is None:
since = datetime.now(tz=UTC()) - timedelta(hours=12)
elif isinstance(value,datetime):
since = value
elif value.lower() in ['all','none','forever']:
since = dateutil.parser.parse('2012-01-01 12:00',tzinfos={'UTC':UTC})
else:
since = dateutil.parser.parse(value,tzinfos={'UTC':UTC})
logging.debug("Since: %s"%since.strftime('%Y-%m-%d %H:%M:%S'))
return since
def draw_constellation(bmap,name):
""" Draw a map of the constellations (work in progress). """
from constellations import CONSTELLATIONS
points = np.array(CONSTELLATIONS[name])
drawtype = points[:,0]
radeg = points[:,1] * 1.0 / 1800 * 15
decdeg = points[:,2] * 1.0 / 60
print(radeg,decdeg)
verts = zip(safe_proj(bmap,radeg,decdeg))
codes = [XEPHEM2PATH[c] for c in points[:,0]]
print(x,y)
def draw_milky_way(bmap,width=10,**kwargs):
""" Draw the Milky Way galaxy. """
defaults = dict(color='k',lw=1.5,ls='-')
setdefaults(kwargs,defaults)
logging.debug("Plotting the Milky Way")
glon = np.linspace(0,360,500)
glat = np.zeros_like(glon)
ra,dec = gal2cel(glon,glat)
ra -= 360*(ra > 180)
proj = safe_proj(bmap,ra,dec)
bmap.plot(*proj,**kwargs)
if width:
kwargs.update(dict(ls='--',lw=1))
for delta in [+width,-width]:
ra,dec = gal2cel(glon,glat+delta)
proj = safe_proj(bmap,ra,dec)
bmap.plot(*proj,**kwargs)
def draw_des(bmap,**kwargs):
"""
Plot the DES wide-field footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
#basedir = os.path.dirname(os.path.abspath(__file__))
infile = os.path.join(get_datadir(),'des-round19-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float)])
proj = safe_proj(bmap,perim['ra'],perim['dec'])
bmap.plot(*proj,**kwargs)
def draw_des_sn(bmap,**kwargs):
"""
Plot the DES supernova fields.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the SN fields
logging.debug("Plotting DES supernova fields.")
boundary = get_boundary(bmap,kwargs.pop('projection',None),fact=0.99)
for v in SN.values():
if not boundary.contains_point(bmap(*v)):
continue
# This does the projection correctly, but fails at boundary
bmap.tissot(v[0],v[1],DECAM,100,**kwargs)
# The SN labels
sntxt_kwargs = dict(zorder=kwargs['zorder'],fontsize=12,
bbox=dict(boxstyle='round,pad=0',fc='w',ec='none',
alpha=0.25))
for k,v in SN_LABELS.items():
plt.gca().annotate(k,bmap(*v),**sntxt_kwargs)
def draw_smash(bmap,**kwargs):
""" Draw the SMASH fields
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
filename = os.path.join(get_datadir(),'smash_fields_final.txt')
smash=np.genfromtxt(filename,dtype=[('ra',float),('dec',float)],usecols=[4,5])
smash_x,smash_y = safe_proj(bmap,smash['ra'],smash['dec'])
kwargs.update(dict(facecolor='none'))
bmap.scatter(smash_x,smash_y,color='k',**kwargs)
def draw_maglites(bmap,**kwargs):
"""
Plot the MagLiteS Phase-I footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting MagLiteS footprint")
infile = os.path.join(get_datadir(),'maglites-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float)])
proj = safe_proj(bmap,perim['ra'],perim['dec'])
bmap.plot(*proj,**kwargs)
def draw_maglites2(bmap,**kwargs):
"""
Plot the MagLiteS Phase-II footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
infile = os.path.join(get_datadir(),'maglitesII-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def draw_bliss(bmap,**kwargs):
"""
Plot the BLISS wide-field footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
infile = os.path.join(get_datadir(),'bliss-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def draw_decals(bmap,**kwargs):
"""
Plot the DECaLS wide-field footprint.
Parameters:
-----------
bmap : The basemap object
kwargs : Various plotting arguments
Returns:
--------
None
"""
# Plot the wide-field survey footprint
logging.debug("Plotting footprint: %s"%opts.footprint)
infile = os.path.join(get_datadir(),'decals-poly.txt')
perim = np.loadtxt(infile,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def draw_delve(bmap,**kwargs):
""" Draw DELVE footprint """
defaults=dict(color='red', lw=2)
setdefaults(kwargs,defaults)
logging.debug("Plotting footprint: %s"%opts.footprint)
deep = odict([
('SextansB', (150.00, 5.33, 3.0)),
('IC5152', (330.67, -51.30, 3.0)),
('NGC300', ( 13.72, -37.68, 3.0)),
('NGC55', ( 3.79, -39.22, 3.0)),
])
boundary = get_boundary(bmap,kwargs.pop('projection',None),fact=0.98)
for ra,dec,radius in deep.values():
if not boundary.contains_point(bmap(ra,dec)): continue
# This does the projection correctly, but fails at boundary
bmap.tissot(ra,dec,radius,100,fc='none',edgecolor=kwargs['color'],lw=kwargs['lw'])
#for ra,dec,radius in deep.values():
# # This doesn't deal with boundaries well
# #self.tissot(ra, dec, radius, 100, fc='none',**kwargs)
# x,y = safe_proj(bmap,np.array([ra]), np.array([dec]))
# bmap.scatter(x,y,facecolor='none',edgecolor=kwargs['color'],lw=2,s=400)
filename = os.path.join(get_datadir(),'delve-poly.txt')
perim = np.loadtxt(filename,dtype=[('ra',float),('dec',float),('poly',int)])
for p in np.unique(perim['poly']):
sel = (perim['poly'] == p)
proj = safe_proj(bmap,perim[sel]['ra'],perim[sel]['dec'])
bmap.plot(*proj,**kwargs)
def plot(opts):
"""
Core plotting function. Creates the basemap, overplots all of the
requested features, and returns the map object.
Parameters:
-----------
opts : command line options
Returns:
--------
m : the basemap object
"""
utc = parse_utc(opts.utc)
CTIO.date = utc
since = parse_since(opts.since)
# Grab the data
data = load_data(opts)
# Subselect the data
sel = np.in1d(data['filter'],FILTERS)
if opts.band in FILTERS:
sel &= (data['filter'] == opts.band)
data = data[sel]
expnum,telra,teldec,band = data['expnum'],data['telra'],data['teldec'],data['filter']
# Set the colors
if opts.color:
nexp = len(expnum)
ncolors = len(COLORS)
color_repeat = np.repeat(COLORS.keys(),nexp).reshape(ncolors,nexp)
color_idx = np.argmax(band==color_repeat,axis=0)
color = np.array(COLORS.values())[color_idx]
else:
color = COLORS['none']
# Select the exposure of interest
if opts.expnum:
match = np.char.array(expnum).endswith(str(opts.expnum))
if not match.any():
msg = "Exposure matching %s not found"%opts.expnum
raise ValueError(msg)
idx = np.nonzero(match)[0][0]
elif len(data)==0:
idx = slice(None)
else:
idx = 0
# Create the figure
if plt.get_fignums():
fig,ax = plt.gcf(),plt.gca()
else:
fig,ax = plt.subplots(figsize=FIGSIZE,dpi=DPI)
fig.canvas.set_window_title("DECam Pointings")
#fig,ax = plt.subplots()
# Zenith position
lon_zen=lmst(CTIO); lat_zen = TEL_LAT
# Create the Basemap
proj_kwargs = PROJ[opts.proj]
# Centering position
if proj_kwargs['projection'] in basemap._pseudocyl:
### This should work, but doesn't.
### Compare lon_0=-80.58345277606 to lon_0=-80.6 or lon_0=-80.5
#lon_0=lon_zen-360*(lon_zen>180),lat_zen=0
lon_0,lat_0 = 0,0
else:
lon_0,lat_0 = -lon_zen, lat_zen # Center position
proj_kwargs.update(lon_0=lon_0,lat_0=lat_0)
bmap = basemap.Basemap(**proj_kwargs)
def format_coord(x,y):
#Format matplotlib cursor to display RA, Dec
lon,lat = safe_proj(bmap,x,y,inverse=True)
lon += 360*(lon < 0)
return 'ra=%1.3f, dec=%1.3f'%(lon,lat)
plt.gca().format_coord = format_coord
parallels = np.arange(-90.,120.,30.)
bmap.drawparallels(parallels)
meridians = np.arange(0.,420.,60.)
bmap.drawmeridians(meridians)
for mer in meridians[:-1]:
plt.annotate(r'$%i^{\circ}$'%mer,bmap(mer,5),ha='center')
plt.annotate('West',xy=(1.0,0.5),ha='left',xycoords='axes fraction')
plt.annotate('East',xy=(0.0,0.5),ha='right',xycoords='axes fraction')
# markersize defined at minimum distortion point
if proj_kwargs['projection'] in basemap._pseudocyl:
x1,y1=ax.transData.transform(bmap(lon_0,lat_0+DECAM))
x2,y2=ax.transData.transform(bmap(lon_0,lat_0-DECAM))
else:
x1,y1=ax.transData.transform(bmap(lon_zen,lat_zen+DECAM))
x2,y2=ax.transData.transform(bmap(lon_zen,lat_zen-DECAM))
# Since markersize defined in "points" in scales with figsize/dpi
size = SCALE * (y1-y2)**2
# Scale the marker size to the size of an exposure
exp_zorder = 10
exp_kwargs = dict(s=size,marker='H',zorder=exp_zorder,edgecolor='k',lw=1)
# Projected exposure locations
x,y = safe_proj(bmap,telra,teldec)
# Plot exposure of interest
if len(data):
logging.debug("Plotting exposure: %i (%3.2f,%3.2f)"%(expnum[idx],telra[idx],teldec[idx]))
# Hacked path effect (fix if matplotlib is updated)
bmap.scatter(x[idx],y[idx],color='w',**dict(exp_kwargs,edgecolor='w',s=70,lw=2))
bmap.scatter(x[idx],y[idx],color=color,**dict(exp_kwargs,alpha=1.0,linewidth=2))
# Once matplotlib is updated
#x = bmap.scatter(x[idx],y[idx],color=color,**exp_kwargs)
#ef = patheffects.withStroke(foreground="w", linewidth=3)
#x.set_path_effects([ef])
# Plot previous exposures
nexp_kwargs = dict(exp_kwargs)
nexp_kwargs.update(zorder=exp_zorder-1,alpha=0.2,edgecolor='none')#,lw=0)
exp_slice = slice(None,opts.numexp)
numexp = len(x[exp_slice])
logging.debug("Plotting last %s exposures"%(numexp))
bmap.scatter(x[exp_slice],y[exp_slice],color=color[exp_slice],**nexp_kwargs)
# Plot zenith position & focal plane scale
zen_x,zen_y = bmap(lon_zen,lat_zen)
#zen_kwargs = dict(color='green',alpha=0.75,lw=1,zorder=0)
zen_kwargs = dict(color='green',alpha=0.75,lw=1,zorder=1000)
if opts.zenith:
logging.debug("Plotting zenith: (%.2f,%.2f)"%(lon_zen,lat_zen))
bmap.plot(zen_x,zen_y,'+',ms=10,**zen_kwargs)
logging.debug("Plotting focal plane scale.")
bmap.tissot(lon_zen, lat_zen, DECAM, 100, fc='none', **zen_kwargs)
# To test exposure size
#bmap.tissot(lon_zen, lat_zen, DECAM, 100, fc='none', **zen_kwargs)
#bmap.scatter(*bmap(lon_zen,lat_zen),**nexp_kwargs)
#bmap.tissot(0, 0, DECAM, 100, fc='none', **zen_kwargs)
#bmap.scatter(*bmap(0,0),**nexp_kwargs)
# Plot airmass circle
if opts.airmass < 1:
logging.warning("Airmass must be greater than one.")
opts.airmass = np.nan
else:
logging.debug("Plotting airmass: %s"%opts.airmass)
angle = airmass_angle(opts.airmass)
bmap.tissot(lon_zen, lat_zen, angle, 100, fc='none',**zen_kwargs)
# Moon location and phase
(moon_ra,moon_dec),moon_phase = moon(utc)
if opts.moon:
logging.debug("Plotting moon: %i%%,(%.1f,%.1f)"%(moon_phase,moon_ra,moon_dec))
moon_txt = '%i%%'%moon_phase
#bbox = dict(boxstyle='circle,pad=0.4',fc='k',ec='k',alpha=0.25,lw=2)
moon_kwargs = dict(zorder=exp_zorder-1,fontsize=11,va='center',ha='center',weight='bold')
ax.annotate(moon_txt,bmap(moon_ra,moon_dec),**moon_kwargs)
# Again old matplotlib making things difficult
moon_kwargs2 = dict(facecolor='k',alpha=0.25,lw=2,s=2000)
ax.scatter(*bmap(moon_ra,moon_dec),**moon_kwargs2)
if opts.mw:
mw_kwargs = dict(color='k')
draw_milky_way(bmap,**mw_kwargs)
# Plot footprint(s)
fp_zorder=exp_zorder-1
fp_kwargs=dict(marker='o',mew=0,mfc='none',color='k',lw=2,zorder=fp_zorder)
if 'none' in opts.footprint:
opts.footprint = ['none']
if 'des' in opts.footprint:
des_kwargs = dict(fp_kwargs,color='b')
draw_des(bmap,**des_kwargs)
if 'des' in opts.footprint or 'des-sn' in opts.footprint:
sn_kwargs = dict(facecolor='none',edgecolor='b',projection=proj_kwargs['projection'],zorder=fp_zorder)
draw_des_sn(bmap,**sn_kwargs)
if 'smash' in opts.footprint:
smash_kwargs = dict(facecolor='none',**exp_kwargs)
smash_kwargs.update(zorder=exp_zorder+1)
draw_smash(bmap,**smash_kwargs)
if 'maglites' in opts.footprint:
maglites_kwargs = dict(fp_kwargs,color='r')
draw_maglites(bmap,**maglites_kwargs)
draw_maglites2(bmap,**maglites_kwargs)
if 'bliss' in opts.footprint:
bliss_kwargs = dict(fp_kwargs,color='r')
draw_bliss(bmap,**bliss_kwargs)
if 'decals' in opts.footprint:
decals_kwargs = dict(fp_kwargs,color='m')
draw_decals(bmap,**decals_kwargs)
if 'delve' in opts.footprint:
delve_kwargs = dict(fp_kwargs,color='r')
draw_delve(bmap,**delve_kwargs)
# Annotate with some information
if opts.legend:
logging.debug("Adding info text.")
bbox_props = dict(boxstyle='round', facecolor='white')
textstr= "%s %s\n"%("UTC:",utc.strftime('%Y-%m-%d %H:%M:%S'))
if len(data):
textstr+="%s %i (%s)\n"%("Exposure:",expnum[idx],band[idx])
textstr+="%s %i\n"%("Num. Exp.:",numexp)
textstr+="%s (%.1f$^{\circ}$, %.1f$^{\circ}$)\n"%("Zenith:",lon_zen,lat_zen)
textstr+="%s %s\n"%("Airmass:",np.nan_to_num(opts.airmass))
textstr+="%s %i%% (%.1f$^{\circ}$, %.1f$^{\circ}$)\n"%("Moon:",moon_phase,moon_ra,moon_dec)
textstr+="%s %s"%("Footprint:",', '.join(opts.footprint))
ax.annotate(textstr, xy=(0.90,1.05), xycoords='axes fraction',
fontsize=10,ha='left',va='top', bbox=bbox_props)
# Plot filter legend
if opts.color:
logging.debug("Adding filter legend.")
leg_kwargs = dict(scatterpoints=1,fontsize=10,bbox_to_anchor=(0.08,0.20))
handles, labels = [],[]
for k in FILTERS:
if k == 'VR' and not (band=='VR').any(): continue
labels.append(k)
handles.append(plt.scatter(None,None,color=COLORS[k],**exp_kwargs))
plt.legend(handles,labels,**leg_kwargs)
# Plot the version number
vers_kwargs = dict(xy=(0.985,0.015),ha='right',va='bottom',
xycoords='figure fraction',size=8)
plt.annotate('pointing v.%s'%__version__,**vers_kwargs)
# Plot the author's name
auth_kwargs = dict(xy=(0.015,0.015),ha='left',va='bottom',
xycoords='figure fraction',size=8)
plt.annotate(u'\u00a9'+' %s'%__author__,**auth_kwargs)
return bmap
if __name__ == "__main__":
import argparse
description = __doc__
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('expnum',nargs='?',type=int,default=None,
help="exposure number to plot")
parser.add_argument('-a','--airmass',default=1.4,type=float,
help='draw airmass limit')
parser.add_argument('-b','--band',default='all',choices=BANDS,
help='draw exposures in specific band')
parser.add_argument('-c','--color',default=True,type=boolean,
help='color corresponding to filter')
parser.add_argument('--db',default='fnal',choices=['ctio','fnal'],
help='database to query for exposures')
parser.add_argument('-f','--footprint',action='append',choices=FOOTPRINTS,
help='footprint to draw')
parser.add_argument('--flavor',default='object|standard',type=str,
help='exposure type [object,zero,dome flat,etc.]')
parser.add_argument('-i','--infile',default=None,
help='list of exposures to draw')
parser.add_argument('--legend',default=True,type=boolean,
help='draw figure legend')
parser.add_argument('-m','--moon',default=True,type=boolean,
help='draw moon location and phase')
parser.add_argument('--mw',action='store_true',
help='draw the Milky Way plane')
parser.add_argument('-n','--numexp',default=None,type=int,
help='number of most recent exposures to plot')
parser.add_argument('-o','--outfile',default=None,
help='output file for saving figure')
parser.add_argument('--propid',default=None,
help='draw exposures from specific propid')
parser.add_argument('--proj',default='ortho',choices=PROJ.keys(),
help='projection for plot')
parser.add_argument('--refresh',nargs='?',default=None,const=60,type=int,
help="refresh interval for figure (seconds).")
parser.add_argument('--since',default=None,
help="UTC for first exposure (defaults to 12 hours)")
parser.add_argument('--utc',default=None,
help="UTC for zenith position (defaults to 'now')")
parser.add_argument('-v','--verbose',action='store_true',
help='output verbosity')
parser.add_argument('--version',action='version',
version='%(prog)s '+__version__)
parser.add_argument('-z','--zenith',default=True,type=boolean,
help="draw zenith position")
opts = parser.parse_args()
# Set logging level
logging.basicConfig(level=logging.DEBUG if opts.verbose else logging.INFO,
format='%(message)s',stream=sys.stdout)
if not opts.footprint: opts.footprint = ['des']
# Do the plotting
m = plot(opts)
# In interactive session
if sys.flags.interactive: plt.ion()
if opts.outfile:
# Save the figure
logging.debug("Saving figure to: %s"%opts.outfile)
plt.savefig(opts.outfile,dpi=250)
elif not opts.refresh:
# Show plot
plt.show()
else:
# Refresh the plot
plt.show(block=False)
for i in range(MAXREF): # safer than while loop
try:
plt.pause(opts.refresh)
except TclError:
# Catch the TclError thrown when window closed
break
logging.debug("Refreshing plot...")
plt.cla()
m = plot(opts)
if i == MAXREF:
logging.info("Reached max refresh number.")
| mit |
mit-crpg/openmc | examples/pincell_depletion/run_depletion.py | 8 | 5021 | from math import pi
import openmc
import openmc.deplete
import matplotlib.pyplot as plt
###############################################################################
# Define materials
###############################################################################
# Instantiate some Materials and register the appropriate Nuclides
uo2 = openmc.Material(name='UO2 fuel at 2.4% wt enrichment')
uo2.set_density('g/cm3', 10.29769)
uo2.add_element('U', 1., enrichment=2.4)
uo2.add_element('O', 2.)
helium = openmc.Material(name='Helium for gap')
helium.set_density('g/cm3', 0.001598)
helium.add_element('He', 2.4044e-4)
zircaloy = openmc.Material(name='Zircaloy 4')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_element('Sn', 0.014, 'wo')
zircaloy.add_element('Fe', 0.00165, 'wo')
zircaloy.add_element('Cr', 0.001, 'wo')
zircaloy.add_element('Zr', 0.98335, 'wo')
borated_water = openmc.Material(name='Borated water')
borated_water.set_density('g/cm3', 0.740582)
borated_water.add_element('B', 4.0e-5)
borated_water.add_element('H', 5.0e-2)
borated_water.add_element('O', 2.4e-2)
borated_water.add_s_alpha_beta('c_H_in_H2O')
###############################################################################
# Create geometry
###############################################################################
# Define surfaces
pitch = 1.25984
fuel_or = openmc.ZCylinder(r=0.39218, name='Fuel OR')
clad_ir = openmc.ZCylinder(r=0.40005, name='Clad IR')
clad_or = openmc.ZCylinder(r=0.45720, name='Clad OR')
box = openmc.model.rectangular_prism(pitch, pitch, boundary_type='reflective')
# Define cells
fuel = openmc.Cell(fill=uo2, region=-fuel_or)
gap = openmc.Cell(fill=helium, region=+fuel_or & -clad_ir)
clad = openmc.Cell(fill=zircaloy, region=+clad_ir & -clad_or)
water = openmc.Cell(fill=borated_water, region=+clad_or & box)
# Define overall geometry
geometry = openmc.Geometry([fuel, gap, clad, water])
###############################################################################
# Set volumes of depletable materials
###############################################################################
# Set material volume for depletion. For 2D simulations, this should be an area.
uo2.volume = pi * fuel_or.r**2
###############################################################################
# Transport calculation settings
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 10
settings.particles = 1000
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings.source = openmc.source.Source(space=uniform_dist)
entropy_mesh = openmc.RegularMesh()
entropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50]
entropy_mesh.upper_right = [0.39218, 0.39218, 1.e50]
entropy_mesh.dimension = [10, 10, 1]
settings.entropy_mesh = entropy_mesh
###############################################################################
# Initialize and run depletion calculation
###############################################################################
# Create depletion "operator"
chain_file = './chain_simple.xml'
op = openmc.deplete.Operator(geometry, settings, chain_file)
# Perform simulation using the predictor algorithm
time_steps = [1.0, 1.0, 1.0, 1.0, 1.0] # days
power = 174 # W/cm, for 2D simulations only (use W for 3D)
integrator = openmc.deplete.PredictorIntegrator(op, time_steps, power, timestep_units='d')
integrator.integrate()
###############################################################################
# Read depletion calculation results
###############################################################################
# Open results file
results = openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")
# Obtain K_eff as a function of time
time, keff = results.get_eigenvalue()
# Obtain U235 concentration as a function of time
time, n_U235 = results.get_atoms('1', 'U235')
# Obtain Xe135 capture reaction rate as a function of time
time, Xe_capture = results.get_reaction_rate('1', 'Xe135', '(n,gamma)')
###############################################################################
# Generate plots
###############################################################################
days = 24*60*60
plt.figure()
plt.plot(time/days, keff, label="K-effective")
plt.xlabel("Time (days)")
plt.ylabel("Keff")
plt.show()
plt.figure()
plt.plot(time/days, n_U235, label="U235")
plt.xlabel("Time (days)")
plt.ylabel("n U5 (-)")
plt.show()
plt.figure()
plt.plot(time/days, Xe_capture, label="Xe135 capture")
plt.xlabel("Time (days)")
plt.ylabel("RR (-)")
plt.show()
plt.close('all')
| mit |
MicheleDamian/ConnectopicMapping | connectopic_mapping/utils.py | 1 | 17291 | """Utility methods to manipulate and visualize voxels."""
import numpy
import nibabel
from matplotlib import pyplot
from mpl_toolkits.axes_grid1 import Grid
from nilearn import datasets, image
def load_masks(dataset, roi_label, hemisphere, remove_white_matter=True):
"""Load brain and region of interest (ROI) masks.
Construct brain and ROI masks from a labeled Harvard-Oxford's `dataset`
(e.g. 'cort-maxprob-thr25-2mm').
Parameters
----------
dataset : string
Name of the labeled dataset from the Harvard-Oxford's atlas
from where to extract the region of interest. The voxels must
have a resolution of 2mm.
roi_label : string
Name of the label from the dataset corresponding to the region
of interest.
hemisphere : string, (options: 'LH', 'RH', 'both')
Hemisphere on which the region of interest is located
(left 'LH', right 'RH' or 'both')
remove_white_matter : bool, (default: True)
Remove white matter voxels from masks.
Returns
-------
brain_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
Boolean matrix where each cell is True if the voxel is part
of the brain, False otherwise.
roi_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
Boolean matrix where each cell is True if the voxel is part
of the region of interest, False otherwise.
Examples
--------
This is the Harvard-Oxford dataset:
>>> import numpy, nilearn, nibabel
>>> cortex_dataset = nilearn.datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')
>>> cortex_labels = numpy.array(cortex_dataset.labels)
>>> 'Precentral Gyrus' in cortex_dataset.labels
True
>>> cortex_data = nibabel.load(cortex_dataset.maps).get_data()
>>> cortex_data.shape
(91, 109, 91)
This is how to load the masks:
>>> brain_mask, roi_mask = utils.load_masks('cort-maxprob-thr25-2mm', 'Precentral Gyrus', 'both', False)
>>> brain_mask.shape
(91, 109, 91)
>>> roi_mask.shape
(91, 109, 91)
And these are the definitions of the masks
(with `remove_white_matter` == False):
>>> idx_background_label = numpy.where((cortex_labels == 'Background'))[0][0]
>>> numpy.all(brain_mask == (cortex_data != idx_background_label))
True
>>> idx_pregyrus_label = numpy.where((cortex_labels == 'Precentral Gyrus'))[0][0]
>>> numpy.all(roi_mask == (cortex_data == idx_pregyrus_label))
True
"""
# Load region
cortex_dataset = datasets.fetch_atlas_harvard_oxford(dataset)
cortex_labels = numpy.array(cortex_dataset.labels)
cortex_maps = nibabel.load(cortex_dataset.maps)
cortex_data = cortex_maps.get_data()
roi_indexes = numpy.where((cortex_labels == roi_label))[0]
# Build mask from ROI
roi_mask = numpy.zeros(cortex_data.shape, dtype=bool)
for index in roi_indexes:
roi_mask[cortex_data == index] = True
# Keep just one hemisphere
roi_mask_width = roi_mask.shape[0]
roi_mask_half_width = int(roi_mask_width/2)
if hemisphere == 'LH':
roi_mask[roi_mask_half_width:, :, :] = False
elif hemisphere == 'RH':
roi_mask[:roi_mask_half_width:, :, :] = False
# Load brain mask
brain_mask = numpy.zeros(cortex_data.shape, dtype=bool)
brain_mask[numpy.nonzero(cortex_data)] = True
# Load white matter mask
if remove_white_matter:
subcortex_dataset = datasets.fetch_atlas_harvard_oxford('sub-maxprob-thr25-2mm')
subcortex_labels = numpy.array(subcortex_dataset.labels)
subcortex_maps = nibabel.load(subcortex_dataset.maps)
subcortex_data = subcortex_maps.get_data()
white_indexes = numpy.where((subcortex_labels == "Left Cerebral White Matter") +
(subcortex_labels == "Right Cerebral White Matter"))[0]
white_mask = numpy.zeros(subcortex_data.shape, dtype=bool)
for index in white_indexes:
white_mask[subcortex_data == index] = True
# Remove white matter from masks
brain_mask = brain_mask * ~white_mask
roi_mask = roi_mask * ~white_mask
return brain_mask, roi_mask
def visualize_volume(data, brain_mask, roi_mask,
slice_indexes,
low_percentile=10, high_percentile=90,
num_fig=0,
title="Projection", margin=1,
legend_location=1, cmap='gist_rainbow'):
"""Visualize the projections of the brain onto the XY, XZ and YZ planes.
Parameters
----------
data : numpy.ndarray, shape (num_voxels, )
1-dimensional data containing the voxels' values inside the ROI
as defined by `roi_mask`.
brain_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
Boolean 3-dimensional numpy ndarray where voxels marked as True
are part of the brain.
roi_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
Boolean 3-dimensional numpy ndarray where voxels marked as True
are part of the same region of interest.
low_percentile : int, (default: 10)
Lower percentile at which to start normalizing `data`. Lower values
are encoded as `low_percentile`. Consider increasing (decreasing)
it if the distribution of `data` has a long (short) left tail.
high_percentile : int, (default: 90)
Higher percentile at which to stop normalizing `data`. Higher values
are encoded as `high_percentile`. Consider decreasing (increasing)
it if the distribution of `data` has a long (short) left tail.
slice_indexes : numpy.ndarray, shape (3, )
List of int representing the X, Y and Z indexes where to cut the
YZ, XZ and XY planes respectively.
num_fig : int, (default: 0)
Number of the figure where to plot the graphs.
title : str, (default: "Projection")
Title for the three graphs to visualize. It is followed by the
plane the graph corresponds to and the coordinates of the plane.
margin : int, (default: 1)
Number of voxels between the ROI and the axes in the visualization.
legend_location : int, (default: 1)
Location of the legend in the graph. 1 := North-East, 2 :=
North-West, 3 := South-West and 4 := South-East. If None the
legend is not visualized.
cmap : str, (default: 'gist_rainbow')
String that identifies a matplotlib.colors.Colormap instance.
Examples
--------
>>> slice_indexes = [18, 65, 50]
>>> visualize_volume(data, brain_mask, roi_mask, slice_indexes)
"""
slice_config = [('X-Z', 'Y', slice_indexes[1], 1),
('Y-Z', 'X', slice_indexes[0], 0),
('X-Y', 'Z', slice_indexes[2], 2)]
# Get voxels color from embedding
min_val = numpy.percentile(data, low_percentile)
max_val = numpy.percentile(data, high_percentile)
data_norm = (data - min_val) / (max_val - min_val)
fun_cmap = pyplot.get_cmap(cmap)
clr_rgb = fun_cmap(data_norm.flatten())
def get_slice_coords(mask, plane):
coords_mask = numpy.where(mask)
coords_idx = numpy.where(coords_mask[plane[3]] == plane[2])[0]
return coords_mask, coords_idx
#
# Display embedding
#
axes = Grid(num_fig, rect=111, nrows_ncols=(2, 2), label_mode='L')
# Display in X, Y and Z subplot
for i in range(3):
dims = numpy.delete(numpy.arange(3), slice_config[i][3])
# Plot brain
coords_mask, coords_idx = get_slice_coords(brain_mask, slice_config[i])
coords_brain_x = coords_mask[dims[0]][coords_idx]
coords_brain_y = coords_mask[dims[1]][coords_idx]
axes[i].scatter(coords_brain_x, coords_brain_y,
c=[0.5, 0.5, 0.5], s=15, edgecolors='face')
axes[i].hold(True)
# Plot ROI
coords_mask, coords_idx = get_slice_coords(roi_mask, slice_config[i])
coords_roi_x = coords_mask[dims[0]][coords_idx]
coords_roi_y = coords_mask[dims[1]][coords_idx]
axes[i].scatter(coords_roi_x, coords_roi_y,
c=clr_rgb[coords_idx, :], s=15, edgecolors='face')
axes[i].set_title("{0} at coord {2}={3}".format(title, *slice_config[i]))
if legend_location is not None:
axes[i].legend(("Cortex", "ROI"), loc=legend_location)
axes[i].grid(True)
#
# Apply stylistic adjustments
#
# Set axis limits
coords_3d = numpy.where(roi_mask)
coords_x = numpy.sort(coords_3d[0])
coords_y = numpy.sort(coords_3d[1])
coords_z = numpy.sort(coords_3d[2])
axes[0].set_xlim([coords_x[0] - margin, coords_x[-1] + margin])
axes[0].set_ylim([coords_z[0] - margin, coords_z[-1] + margin])
axes[1].set_xlim([coords_y[0] - margin, coords_y[-1] + margin])
axes[2].set_ylim([coords_y[0] - margin, coords_y[-1] + margin])
# Remove shared borders
axes[0].spines['bottom'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[1].spines['left'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
axes[3].spines['top'].set_visible(False)
axes[3].spines['left'].set_visible(False)
# Name axes
axes[0].set_ylabel("Z axis")
axes[2].set_xlabel("X axis")
axes[2].set_ylabel("Y axis")
axes[3].set_xlabel("Y axis")
# Backgroud color
bg_color = [0.9, 0.9, 0.9]
for i in range(4):
axes[i].patch.set_facecolor(bg_color)
# Add colorbar
sm = pyplot.cm.ScalarMappable(cmap=cmap, norm=pyplot.Normalize(vmin=0, vmax=1))
sm._A = []
cbaxes = num_fig.add_axes([0.95, 0.1, 0.01, 0.4])
cbar = pyplot.colorbar(sm, cax=cbaxes)
cbar.set_ticklabels([])
def normalize_nifti_image(image_path, fwhm=6):
"""Normalize a 4D Nifti image.
Apply a spatial smooth of dimension `fwhm` on a 4D nifti image
located at `image_path` and normalize it along the temporal axis.
The normalization step subtracts and divides the voxels' values
by their means. The method returns just the active voxels and
their XYZ-coordinates to reduce memory usage.
Parameters
----------
image_path : string
Path of the 4D Nifti image.
fwhm : float, (default: 6)
Full width half maximum window dimension used to smooth the
image. The same value will be used for all 3 spatial dimensions.
Returns
-------
nifti_data : numpy.ndarray, shape (num_timepoints, num_voxels)
Voxels' values normalized that were active at least at one
timepoint during the scan.
idx_active : tuple, shape (3)
X,Y,Z-coordinates of the 3D brain where the active voxels
are located.
"""
# Smooth
nifti_image = image.smooth_img(image_path, fwhm)
nifti_data = nifti_image.get_data()
# Keep just non-zero voxels
is_zero = numpy.abs(nifti_data) < numpy.finfo(nifti_data.dtype).eps
idx_active = numpy.where(~numpy.all(is_zero, axis=-1))
# Calc mean
nifti_data = nifti_data[idx_active]
nifti_data_mean = numpy.mean(nifti_data, axis=-1)[..., numpy.newaxis]
# Demean and normalize
nifti_data = nifti_data - nifti_data_mean
nifti_data = nifti_data / nifti_data_mean
return nifti_data.T, idx_active
def concatenate_data(brain_mask, roi_mask, data_0, data_xyz_0, data_1, data_xyz_1):
"""Concatenate data.
Concatenate `data_0` and `data_1` along their first dimension such that
the first part of the result represents ROI voxels and the second part
represents outside of ROI voxels, as defined by `brain_mask` and `roi_mask`.
Parameters
----------
brain_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
A 3-dimensional boolean matrix where each cell represent a
voxel and is True if it belongs to the brain, False otherwise.
roi_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
A 3-dimensional boolean matrix of the same size of `brain_mask`
where each cell is True if the voxel belongs to the region of
interest, False otherwise.
data_0, data_1 : numpy.ndarray, shape (num_timepoints, num_voxels)
Voxels' time-series inside the ROI.
data_xyz_0, data_xyz_1 : tuple, shape (3, num_voxels)
X,Y,Z-coordinates of `data_0`/`data_1`. Each element of the triplet
is an array-like list of voxels' coordinates along an axis, stored
in the same order of the voxels' values in `data_0`/`data_1`.
Returns
-------
brain_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
As the homonymous input parameter, but updated such that just the
voxels present in `data_0` and `data_1` are set to True.
roi_mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
As the homonymous input parameter, but updated such that just the
voxels present in `data_0` and `data_1` are set to True.
data : numpy.ndarray, shape (num_timepoints, num_voxels)
Input data concatenated along the num_timepoints axe such that
the first ``numpy.sum(roi_mask)`` of `num_voxels` are voxels inside
the region of interest, while the other voxels are outside.
Examples
--------
Concatenate two scans:
>>> brain_mask, roi_mask = utils.load_masks('cort-maxprob-thr25-2mm', 'Precentral Gyrus', 'both') # Load masks
>>> data_info_0 = utils.normalize_nifti_image(image_path_0) # Load scan 0
>>> data_info_1 = utils.normalize_nifti_image(image_path_1) # Load scan 1
>>> brain_mask, roi_mask, data = utils.concatenate_data(brain_mask, roi_mask, *data_info_0, *data_info_1) # Concatenate scans
"""
def get_idx(xyz, dim):
""" Transforms X,Y,Z coords to a unique index.
"""
return xyz[0]*dim[1]*dim[2] + xyz[1]*dim[2] + xyz[2]
# Keep just voxels that are non-zero in both scans and inside
# brain mask
brain_data_0 = numpy.zeros(brain_mask.shape, dtype=bool)
brain_data_1 = numpy.zeros(brain_mask.shape, dtype=bool)
brain_data_0[data_xyz_0] = True
brain_data_1[data_xyz_1] = True
# Remove non active voxels from masks (i.e., voxels not present
# in the data)
brain_mask = brain_mask * brain_data_0 * brain_data_1
roi_mask = roi_mask * brain_mask
nonroi_mask = ~roi_mask * brain_mask
# Generate brain indexes
data_idx_0 = get_idx(data_xyz_0, brain_mask.shape)
data_idx_1 = get_idx(data_xyz_1, brain_mask.shape)
# Generate inside ROI indexes
roi_mask_xyz = numpy.nonzero(roi_mask)
roi_mask_idx = get_idx(roi_mask_xyz, brain_mask.shape)
# Generate outside ROI indexes
nonroi_mask_xyz = numpy.nonzero(nonroi_mask)
nonroi_mask_idx = get_idx(nonroi_mask_xyz, brain_mask.shape)
# Test if index is inside ROI
is_roi_0 = numpy.in1d(data_idx_0, roi_mask_idx)
is_roi_1 = numpy.in1d(data_idx_1, roi_mask_idx)
# Test if index is outside ROI
is_nonroi_0 = numpy.in1d(data_idx_0, nonroi_mask_idx)
is_nonroi_1 = numpy.in1d(data_idx_1, nonroi_mask_idx)
# Merge data
data = numpy.concatenate((
numpy.concatenate((data_0[:, is_roi_0], data_1[:, is_roi_1]), axis=0),
numpy.concatenate((data_0[:, is_nonroi_0], data_1[:, is_nonroi_1]), axis=0)),
axis=1)
assert(numpy.sum(is_roi_0) + numpy.sum(is_nonroi_0) == numpy.sum(brain_mask, axis=None))
assert(numpy.sum(is_roi_1) + numpy.sum(is_nonroi_1) == numpy.sum(brain_mask, axis=None))
return brain_mask, roi_mask, data
def save_nifti(file_path, data, mask, affine=None, zooms=[2,2,2]):
"""Save a nifti image.
Save the voxels that are set True in `mask` and which values are contained
in `data` in a 3D nifti image.
Parameters
----------
file_path : str
File path of the nifti image.
data : numpy.ndarray, shape (num_voxels, )
Values of the voxels. The order of the voxels must be the same as
their coordinates in `numpy.where(mask)`.
mask : numpy.ndarray, shape (x_dim, y_dim, z_dim)
3-dimensional boolean numpy array representing brain voxels, where
an element is set to True if and only if it is present in data.
affine : numpy.ndarray, shape (4, 4), (default=None)
Nifti affine matrix.
zooms : array_like, shape(3, ), (default: [2,2,2])
Nifti zooms in millimeters.
"""
if affine is None:
affine = [[-2, 0, 0, 90],
[0, 2, 0, -126],
[0, 0, 2, -72],
[0, 0, 0, 1]]
# Set voxels values
coords = numpy.where(mask)
out = numpy.zeros(mask.shape)
out[coords] = numpy.asarray(data, dtype=numpy.float32)
out = numpy.flip(out, axis=0)
# Transform connectopic map into Nifti image
nifti = nibabel.Nifti1Image(out, affine)
nifti.header.set_data_dtype(numpy.float32)
nifti.header.set_zooms(zooms)
nifti.header['qform_code'] = 4
nifti.header['sform_code'] = 4
nifti.header['xyzt_units'] = 10
nifti.to_filename(file_path)
| apache-2.0 |
manpen/hypergen | libs/NetworKit/scripts/DynamicBetweennessExperiments.py | 3 | 4139 | from networkit import *
from networkit.dynamic import *
from networkit.centrality import *
import pandas as pd
import random
def removeAndAddEdges(G, nEdges, tabu=None):
if nEdges > G.numberOfEdges() - tabu.numberOfEdges():
raise Error("G does not have enough edges")
# select random edges for removal
removed = set()
while len(removed) < nEdges:
(u, v) = G.randomEdge()
if not tabu.hasEdge(u, v) and not ((u,v) in removed or (v,u) in removed): # exclude all edges in the tabu graph
removed.add((u, v))
print (removed)
# build event streams
removeStream = []
for (u, v) in removed:
removeStream.append(GraphEvent(GraphEvent.EDGE_REMOVAL, u, v, 0))
addStream = []
for (u, v) in removed:
addStream.append(GraphEvent(GraphEvent.EDGE_ADDITION, u, v, 1.0))
return (removeStream, addStream)
def setRandomWeights(G, mu, sigma):
"""
Add random weights, normal distribution with mean mu and standard deviation sigma
"""
for (u, v) in G.edges():
w = random.normalvariate(mu, sigma)
G.setWeight(u, v, w)
return G
def test(G, nEdges, batchSize, epsilon, delta, size):
# find a set of nEdges to remove from G
T = graph.SpanningForest(G).generate()
(removeStream, addStream) = removeAndAddEdges(G, nEdges, tabu=T)
# remove the edges from G
updater = dynamic.GraphUpdater(G)
updater.update(removeStream)
# run the algorithms on the inital graph
bc = Betweenness(G)
print("Running bc")
bc.run()
dynBc = DynBetweenness(G, True)
print("Running dyn bc with predecessors")
dynBc.run()
apprBc = ApproxBetweenness(G, epsilon, delta)
print("Running approx bc")
apprBc.run()
dynApprBc = DynApproxBetweenness(G, epsilon, delta, True)
print("Running dyn approx bc with predecessors")
dynApprBc.run()
# apply the batches
nExperiments = nEdges // batchSize
timesBc = []
timesDynBc = []
timesApprBc = []
timesDynApprBc = []
scoresBc = []
scoresApprBc = []
for i in range(nExperiments):
batch = addStream[i*batchSize : (i+1)*batchSize]
# add the edges of batch to the graph
totalTime = 0.0
for j in range(0, batchSize):
updater.update([batch[j]])
# update the betweenness with the dynamic exact algorithm
t = stopwatch.Timer()
dynBc.update(batch[j])
totalTime += t.stop()
timesDynBc.append(totalTime)
# update the betweenness with the static exact algorithm
t = stopwatch.Timer()
bc.run()
x = t.stop()
timesBc.append(x)
print("Exact BC")
print(x)
print("Speedup Dyn BC (with preds)")
print(x/totalTime)
# update the betweenness with the static approximated algorithm
t = stopwatch.Timer()
apprBc.run()
x = t.stop()
timesApprBc.append(x)
print("ApprBC")
print(x)
# update the betweenness with the dynamic approximated algorithm
t = stopwatch.Timer()
dynApprBc.update(batch)
y = t.stop()
timesDynApprBc.append(y)
print("Speedup DynApprBC (with preds)")
print(x/y)
bcNormalized = [ k/(size*(size-1)) for k in bc.scores()]
scoresBc.append(bcNormalized)
scoresApprBc.append(dynApprBc.scores())
a = pd.Series(timesBc)
b = pd.Series(timesDynBc)
c = pd.Series(timesApprBc)
d = pd.Series(timesDynApprBc)
df1 = pd.DataFrame({"Static exact bc": a, "Dynamic exact bc" : b, "Static approx bc" : c, "Dynamic approx bc" : d})
dic2 = {}
for experiment in range(nExperiments):
a = pd.Series(scoresBc[experiment])
b = pd.Series(scoresApprBc[experiment])
dic2["Exact scores (exp. "+str(experiment)+")"] = a
dic2["Approx scores (exp. "+str(experiment)+")"] = b
df2 = pd.DataFrame(dic2)
return df1, df2
if __name__ == "__main__":
setNumberOfThreads(1)
size = 20000
for i in range(11):
batchSize = 2**i
G = generators.DorogovtsevMendesGenerator(size).generate()
cc = properties.ConnectedComponents(G)
cc.run()
if (cc.numberOfComponents() == 1) :
nEdges = batchSize * 10
epsilon = 0.05
delta = 0.1
(df1, df2) = test(G, nEdges, batchSize, epsilon, delta, size)
df1.to_csv("results/times_unweighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
df2.to_csv("results/scores_unweighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
else:
print("The generated graph is not connected.")
| gpl-3.0 |
bigdataelephants/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
maciejkula/spotlight | examples/bloom_embeddings/plot.py | 1 | 2760 | import argparse
import pandas as pd
import matplotlib
matplotlib.use('Agg') # NOQA
import matplotlib.pyplot as plt
import seaborn as sns
from example import Results
def process_results(results, verbose=False):
baseline = results.best_baseline()
def like_baseline(x):
for key in ('n_iter',
'batch_size',
'l2',
'learning_rate',
'loss',
'embedding_dim'):
if x[key] != baseline[key]:
return False
return True
data = pd.DataFrame([x for x in results
if like_baseline(x)])
best = (data.sort_values('test_mrr', ascending=False)
.groupby('compression_ratio', as_index=False).first())
# Normalize per iteration
best['elapsed'] = best['elapsed'] / best['n_iter']
if verbose:
print(best)
baseline_mrr = (best[best['compression_ratio'] == 1.0]
['validation_mrr'].values[0])
baseline_time = (best[best['compression_ratio'] == 1.0]
['elapsed'].values[0])
compression_ratio = best['compression_ratio'].values
mrr = best['validation_mrr'].values / baseline_mrr
elapsed = best['elapsed'].values / baseline_time
return compression_ratio[:-1], mrr[:-1], elapsed[:-1]
def plot_results(model, movielens, amazon):
sns.set_style("darkgrid")
for name, result in (('Movielens',
movielens), ('Amazon', amazon)):
print('Dataset: {}'.format(name))
(compression_ratio,
mrr,
elapsed) = process_results(result, verbose=True)
plt.plot(compression_ratio, mrr,
label=name)
plt.ylabel("MRR ratio to baseline")
plt.xlabel("Compression ratio")
plt.title("Compression ratio vs MRR ratio")
plt.legend(loc='lower right')
plt.savefig('{}_plot.png'.format(model))
plt.close()
for name, result in (('Movielens',
movielens), ('Amazon', amazon)):
(compression_ratio,
mrr,
elapsed) = process_results(result)
plt.plot(compression_ratio, elapsed,
label=name)
plt.ylabel("Time ratio to baseline")
plt.xlabel("Compression ratio")
plt.title("Compression ratio vs time ratio")
plt.legend(loc='lower right')
plt.savefig('{}_time.png'.format(model))
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
args = parser.parse_args()
plot_results(args.model,
Results('movielens_{}_results.txt'.format(args.model)),
Results('amazon_{}_results.txt'.format(args.model)))
| mit |
WarrenWeckesser/numpy | numpy/doc/creation.py | 6 | 5431 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64. ::
>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: h5py
FITS: Astropy
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
| bsd-3-clause |
dgasmith/EEX_scratch | tests/test_gromacs.py | 1 | 1044 | """
Tests for GROMACS IO
"""
import eex
import numpy as np
import os
import pytest
import pandas as pd
import eex_find_files
# Not working on GROMACS for now -
"""
@pytest.fixture(scope="module")
def nbutane_dl():
dl = eex.datalayer.DataLayer("test_gromacs_read")
gro_folder = eex_find_files.get_example_filename("gromacs", "alkanes", "nbutane")
ffdir = os.path.join(gro_folder, "..", "trappe.ff")
eex.translators.gromacs.read_gromacs_gro_file(dl, gro_folder, ffdir=ffdir)
return dl
def test_gromacs_read_conf(nbutane_dl):
dl = nbutane_dl
box_size = dl.get_box_size()
assert box_size["x"][0] == pytest.approx(-2.5737, 1.e-6)
assert box_size["x"][1] == pytest.approx(2.5737, 1.e-6)
data = dl.get_atoms(None)
assert data.shape[0] == 4
assert dl.get_atom_count() == 4
assert np.allclose(data["atomic_number"], [6, 6, 6, 6])
assert np.allclose(data[["X", "Y", "Z"]].min(axis=0), [-0.147, -0.046, -0.153])
assert np.allclose(data[["X", "Y", "Z"]].max(axis=0), [0.0, 0.16, 0.0])
""" | bsd-3-clause |
linebp/pandas | asv_bench/benchmarks/timeseries.py | 1 | 15456 | try:
from pandas.plotting._converter import DatetimeConverter
except ImportError:
from pandas.tseries.converter import DatetimeConverter
from .pandas_vb_common import *
import pandas as pd
import datetime as dt
try:
import pandas.tseries.holiday
except ImportError:
pass
from pandas.tseries.frequencies import infer_freq
import numpy as np
if hasattr(Series, 'convert'):
Series.resample = Series.convert
class DatetimeIndex(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
self.delta_offset = pd.offsets.Day()
self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
self.slow_offset = pd.offsets.BusinessDay()
self.rng2 = date_range(start='1/1/2000 9:30', periods=10000, freq='S', tz='US/Eastern')
self.index_repeated = date_range(start='1/1/2000', periods=1000, freq='T').repeat(10)
self.rng3 = date_range(start='1/1/2000', periods=1000, freq='H')
self.df = DataFrame(np.random.randn(len(self.rng3), 2), self.rng3)
self.rng4 = date_range(start='1/1/2000', periods=1000, freq='H', tz='US/Eastern')
self.df2 = DataFrame(np.random.randn(len(self.rng4), 2), index=self.rng4)
N = 100000
self.dti = pd.date_range('2011-01-01', freq='H', periods=N).repeat(5)
self.dti_tz = pd.date_range('2011-01-01', freq='H', periods=N,
tz='Asia/Tokyo').repeat(5)
self.rng5 = date_range(start='1/1/2000', end='3/1/2000', tz='US/Eastern')
self.dst_rng = date_range(start='10/29/2000 1:00:00', end='10/29/2000 1:59:59', freq='S')
self.index = date_range(start='10/29/2000', end='10/29/2000 00:59:59', freq='S')
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(date_range(start='10/29/2000 2:00:00', end='10/29/2000 3:00:00', freq='S'))
self.N = 10000
self.rng6 = date_range(start='1/1/1', periods=self.N, freq='B')
self.rng7 = date_range(start='1/1/1700', freq='D', periods=100000)
self.a = self.rng7[:50000].append(self.rng7[50002:])
def time_add_timedelta(self):
(self.rng + dt.timedelta(minutes=2))
def time_add_offset_delta(self):
(self.rng + self.delta_offset)
def time_add_offset_fast(self):
(self.rng + self.fast_offset)
def time_add_offset_slow(self):
(self.rng + self.slow_offset)
def time_normalize(self):
self.rng2.normalize()
def time_unique(self):
self.index_repeated.unique()
def time_reset_index(self):
self.df.reset_index()
def time_reset_index_tz(self):
self.df2.reset_index()
def time_dti_factorize(self):
self.dti.factorize()
def time_dti_tz_factorize(self):
self.dti_tz.factorize()
def time_timestamp_tzinfo_cons(self):
self.rng5[0]
def time_infer_dst(self):
self.index.tz_localize('US/Eastern', infer_dst=True)
def time_timeseries_is_month_start(self):
self.rng6.is_month_start
def time_infer_freq(self):
infer_freq(self.a)
class TimeDatetimeConverter(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
class Iteration(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.M = 10000
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
def iter_n(self, iterable, n=None):
self.i = 0
for _ in iterable:
self.i += 1
if ((n is not None) and (self.i > n)):
break
def time_iter_datetimeindex(self):
self.iter_n(self.idx1)
def time_iter_datetimeindex_preexit(self):
self.iter_n(self.idx1, self.M)
def time_iter_periodindex(self):
self.iter_n(self.idx2)
def time_iter_periodindex_preexit(self):
self.iter_n(self.idx2, self.M)
#----------------------------------------------------------------------
# Resampling
class ResampleDataFrame(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='20130101', periods=100000, freq='50L')
self.df = DataFrame(np.random.randn(100000, 2), index=self.rng)
def time_max_numpy(self):
self.df.resample('1s', how=np.max)
def time_max_string(self):
self.df.resample('1s', how='max')
def time_mean_numpy(self):
self.df.resample('1s', how=np.mean)
def time_mean_string(self):
self.df.resample('1s', how='mean')
def time_min_numpy(self):
self.df.resample('1s', how=np.min)
def time_min_string(self):
self.df.resample('1s', how='min')
class ResampleSeries(object):
goal_time = 0.2
def setup(self):
self.rng1 = period_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts1 = Series(np.random.randn(len(self.rng1)), index=self.rng1)
self.rng2 = date_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts2 = Series(np.random.randn(len(self.rng2)), index=self.rng2)
self.rng3 = date_range(start='2000-01-01 00:00:00', end='2000-01-01 10:00:00', freq='555000U')
self.int_ts = Series(5, self.rng3, dtype='int64')
self.dt_ts = self.int_ts.astype('datetime64[ns]')
def time_period_downsample_mean(self):
self.ts1.resample('D', how='mean')
def time_timestamp_downsample_mean(self):
self.ts2.resample('D', how='mean')
def time_resample_datetime64(self):
# GH 7754
self.dt_ts.resample('1S', how='last')
def time_1min_5min_mean(self):
self.ts2[:10000].resample('5min', how='mean')
def time_1min_5min_ohlc(self):
self.ts2[:10000].resample('5min', how='ohlc')
class AsOf(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
self.ts2 = self.ts.copy()
self.ts2[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# without *while* loop
def time_asof_single(self):
self.ts.asof(self.dates[0])
# test speed of the code path for a scalar index
# before the start. should be the same as above.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
# test the speed of the code path for a scalar index
# with a long *while* loop. should still be much
# faster than pre-computing all the NAs.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
class AsOfDataFrame(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.M = 100
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
self.ts = DataFrame(np.random.randn(self.N, self.M), index=self.rng)
self.ts2 = self.ts.copy()
self.ts2.iloc[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3.iloc[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# with pre-computing all NAs.
def time_asof_single(self):
self.ts.asof(self.dates[0])
# should be roughly the same as above.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
# test speed of the code path for a scalar index
# before the start. should be without the cost of
# pre-computing all the NAs.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
class TimeSeries(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='s')
self.rng = self.rng.take(np.random.permutation(self.N))
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.rng2 = date_range(start='1/1/2000', periods=self.N, freq='T')
self.ts2 = Series(np.random.randn(self.N), index=self.rng2)
self.lindex = np.random.permutation(self.N)[:(self.N // 2)]
self.rindex = np.random.permutation(self.N)[:(self.N // 2)]
self.left = Series(self.ts2.values.take(self.lindex), index=self.ts2.index.take(self.lindex))
self.right = Series(self.ts2.values.take(self.rindex), index=self.ts2.index.take(self.rindex))
self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S')
self.ts3 = Series(1, index=self.rng3)
def time_sort_index_monotonic(self):
self.ts2.sort_index()
def time_sort_index_non_monotonic(self):
self.ts.sort_index()
def time_timeseries_slice_minutely(self):
self.ts2[:10000]
def time_add_irregular(self):
(self.left + self.right)
def time_large_lookup_value(self):
self.ts3[self.ts3.index[(len(self.ts3) // 2)]]
self.ts3.index._cleanup()
class SeriesArithmetic(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.s = Series(date_range(start='20140101', freq='T', periods=self.N))
self.delta_offset = pd.offsets.Day()
self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
self.slow_offset = pd.offsets.BusinessDay()
def time_add_offset_delta(self):
(self.s + self.delta_offset)
def time_add_offset_fast(self):
(self.s + self.fast_offset)
def time_add_offset_slow(self):
(self.s + self.slow_offset)
class ToDatetime(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='1/1/2000', periods=10000, freq='D')
self.stringsD = Series((((self.rng.year * 10000) + (self.rng.month * 100)) + self.rng.day), dtype=np.int64).apply(str)
self.rng = date_range(start='1/1/2000', periods=20000, freq='H')
self.strings = [x.strftime('%Y-%m-%d %H:%M:%S') for x in self.rng]
self.strings_nosep = [x.strftime('%Y%m%d %H:%M:%S') for x in self.rng]
self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800'
for x in self.rng]
self.s = Series((['19MAY11', '19MAY11:00:00:00'] * 100000))
self.s2 = self.s.str.replace(':\\S+$', '')
def time_format_YYYYMMDD(self):
to_datetime(self.stringsD, format='%Y%m%d')
def time_iso8601(self):
to_datetime(self.strings)
def time_iso8601_nosep(self):
to_datetime(self.strings_nosep)
def time_iso8601_format(self):
to_datetime(self.strings, format='%Y-%m-%d %H:%M:%S')
def time_iso8601_format_no_sep(self):
to_datetime(self.strings_nosep, format='%Y%m%d %H:%M:%S')
def time_iso8601_tz_spaceformat(self):
to_datetime(self.strings_tz_space)
def time_format_exact(self):
to_datetime(self.s2, format='%d%b%y')
def time_format_no_exact(self):
to_datetime(self.s, format='%d%b%y', exact=False)
class Offsets(object):
goal_time = 0.2
def setup(self):
self.date = dt.datetime(2011, 1, 1)
self.dt64 = np.datetime64('2011-01-01 09:00Z')
self.hcal = pd.tseries.holiday.USFederalHolidayCalendar()
self.day = pd.offsets.Day()
self.year = pd.offsets.YearBegin()
self.cday = pd.offsets.CustomBusinessDay()
self.cmb = pd.offsets.CustomBusinessMonthBegin(calendar=self.hcal)
self.cme = pd.offsets.CustomBusinessMonthEnd(calendar=self.hcal)
self.cdayh = pd.offsets.CustomBusinessDay(calendar=self.hcal)
def time_timeseries_day_apply(self):
self.day.apply(self.date)
def time_timeseries_day_incr(self):
(self.date + self.day)
def time_timeseries_year_apply(self):
self.year.apply(self.date)
def time_timeseries_year_incr(self):
(self.date + self.year)
# custom business offsets
def time_custom_bday_decr(self):
(self.date - self.cday)
def time_custom_bday_incr(self):
(self.date + self.cday)
def time_custom_bday_apply(self):
self.cday.apply(self.date)
def time_custom_bday_apply_dt64(self):
self.cday.apply(self.dt64)
def time_custom_bday_cal_incr(self):
self.date + 1 * self.cdayh
def time_custom_bday_cal_decr(self):
self.date - 1 * self.cdayh
def time_custom_bday_cal_incr_n(self):
self.date + 10 * self.cdayh
def time_custom_bday_cal_incr_neg_n(self):
self.date - 10 * self.cdayh
# Increment custom business month
def time_custom_bmonthend_incr(self):
(self.date + self.cme)
def time_custom_bmonthend_incr_n(self):
(self.date + (10 * self.cme))
def time_custom_bmonthend_decr_n(self):
(self.date - (10 * self.cme))
def time_custom_bmonthbegin_decr_n(self):
(self.date - (10 * self.cmb))
def time_custom_bmonthbegin_incr_n(self):
(self.date + (10 * self.cmb))
class SemiMonthOffset(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
# date is not on an offset which will be slowest case
self.date = dt.datetime(2011, 1, 2)
self.semi_month_end = pd.offsets.SemiMonthEnd()
self.semi_month_begin = pd.offsets.SemiMonthBegin()
def time_end_apply(self):
self.semi_month_end.apply(self.date)
def time_end_incr(self):
self.date + self.semi_month_end
def time_end_incr_n(self):
self.date + 10 * self.semi_month_end
def time_end_decr(self):
self.date - self.semi_month_end
def time_end_decr_n(self):
self.date - 10 * self.semi_month_end
def time_end_apply_index(self):
self.semi_month_end.apply_index(self.rng)
def time_end_incr_rng(self):
self.rng + self.semi_month_end
def time_end_decr_rng(self):
self.rng - self.semi_month_end
def time_begin_apply(self):
self.semi_month_begin.apply(self.date)
def time_begin_incr(self):
self.date + self.semi_month_begin
def time_begin_incr_n(self):
self.date + 10 * self.semi_month_begin
def time_begin_decr(self):
self.date - self.semi_month_begin
def time_begin_decr_n(self):
self.date - 10 * self.semi_month_begin
def time_begin_apply_index(self):
self.semi_month_begin.apply_index(self.rng)
def time_begin_incr_rng(self):
self.rng + self.semi_month_begin
def time_begin_decr_rng(self):
self.rng - self.semi_month_begin
| bsd-3-clause |
jseabold/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 168 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
MTK6580/walkie-talkie | ALPS.L1.MP6.V2_HEXING6580_WE_L/alps/cts/apps/CameraITS/tests/scene1/test_black_white.py | 3 | 3146 | # Copyright 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.caps
import its.device
import its.objects
import pylab
import os.path
import matplotlib
import matplotlib.pyplot
def main():
"""Test that the device will produce full black+white images.
"""
NAME = os.path.basename(__file__).split(".")[0]
r_means = []
g_means = []
b_means = []
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.manual_sensor(props) and
its.caps.per_frame_control(props))
expt_range = props['android.sensor.info.exposureTimeRange']
sens_range = props['android.sensor.info.sensitivityRange']
# Take a shot with very low ISO and exposure time. Expect it to
# be black.
print "Black shot: sens = %d, exp time = %.4fms" % (
sens_range[0], expt_range[0]/1000000.0)
req = its.objects.manual_capture_request(sens_range[0], expt_range[0])
cap = cam.do_capture(req)
img = its.image.convert_capture_to_rgb_image(cap)
its.image.write_image(img, "%s_black.jpg" % (NAME))
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
black_means = its.image.compute_image_means(tile)
r_means.append(black_means[0])
g_means.append(black_means[1])
b_means.append(black_means[2])
print "Dark pixel means:", black_means
# Take a shot with very high ISO and exposure time. Expect it to
# be white.
print "White shot: sens = %d, exp time = %.2fms" % (
sens_range[1], expt_range[1]/1000000.0)
req = its.objects.manual_capture_request(sens_range[1], expt_range[1])
cap = cam.do_capture(req)
img = its.image.convert_capture_to_rgb_image(cap)
its.image.write_image(img, "%s_white.jpg" % (NAME))
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
white_means = its.image.compute_image_means(tile)
r_means.append(white_means[0])
g_means.append(white_means[1])
b_means.append(white_means[2])
print "Bright pixel means:", white_means
# Draw a plot.
pylab.plot([0,1], r_means, 'r')
pylab.plot([0,1], g_means, 'g')
pylab.plot([0,1], b_means, 'b')
pylab.ylim([0,1])
matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
for val in black_means:
assert(val < 0.025)
for val in white_means:
assert(val > 0.975)
if __name__ == '__main__':
main()
| gpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/units/basic_units.py | 3 | 10916 | import math
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
from matplotlib.axes import Axes
from matplotlib.cbook import iterable
class ProxyDelegate(object):
def __init__(self, fn_name, proxy_type):
self.proxy_type = proxy_type
self.fn_name = fn_name
def __get__(self, obj, objtype=None):
return self.proxy_type(self.fn_name, obj)
class TaggedValueMeta (type):
def __init__(cls, name, bases, dict):
for fn_name in cls._proxies.keys():
try:
dummy = getattr(cls, fn_name)
except AttributeError:
setattr(cls, fn_name, ProxyDelegate(fn_name, cls._proxies[fn_name]))
class PassThroughProxy(object):
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
#print 'passthrough', self.target, self.fn_name
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
class ConvertArgsProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
for a in args:
try:
converted_args.append(a.convert_to(self.unit))
except AttributeError:
converted_args.append(TaggedValue(a, self.unit))
converted_args = tuple([c.get_value() for c in converted_args])
return PassThroughProxy.__call__(self, *converted_args)
class ConvertReturnProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
ret = PassThroughProxy.__call__(self, *args)
if (type(ret) == type(NotImplemented)):
return NotImplemented
return TaggedValue(ret, self.unit)
class ConvertAllProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
arg_units = [self.unit]
for a in args:
if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'):
# if this arg has a unit type but no conversion ability,
# this operation is prohibited
return NotImplemented
if hasattr(a, 'convert_to'):
try:
a = a.convert_to(self.unit)
except:
pass
arg_units.append(a.get_unit())
converted_args.append(a.get_value())
else:
converted_args.append(a)
if hasattr(a, 'get_unit'):
arg_units.append(a.get_unit())
else:
arg_units.append(None)
converted_args = tuple(converted_args)
ret = PassThroughProxy.__call__(self, *converted_args)
if (type(ret) == type(NotImplemented)):
return NotImplemented
ret_unit = unit_resolver(self.fn_name, arg_units)
if (ret_unit == NotImplemented):
return NotImplemented
return TaggedValue(ret, ret_unit)
class TaggedValue (object):
__metaclass__ = TaggedValueMeta
_proxies = {'__add__':ConvertAllProxy,
'__sub__':ConvertAllProxy,
'__mul__':ConvertAllProxy,
'__rmul__':ConvertAllProxy,
'__cmp__':ConvertAllProxy,
'__lt__':ConvertAllProxy,
'__gt__':ConvertAllProxy,
'__len__':PassThroughProxy}
def __new__(cls, value, unit):
# generate a new subclass for value
value_class = type(value)
try:
subcls = type('TaggedValue_of_%s' % (value_class.__name__),
tuple([cls, value_class]),
{})
if subcls not in units.registry:
units.registry[subcls] = basicConverter
return object.__new__(subcls, value, unit)
except TypeError:
if cls not in units.registry:
units.registry[cls] = basicConverter
return object.__new__(cls, value, unit)
def __init__(self, value, unit):
self.value = value
self.unit = unit
self.proxy_target = self.value
def get_compressed_copy(self, mask):
compressed_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(compressed_value, self.unit)
def __getattribute__(self, name):
if (name.startswith('__')):
return object.__getattribute__(self, name)
variable = object.__getattribute__(self, 'value')
if (hasattr(variable, name) and name not in self.__class__.__dict__):
return getattr(variable, name)
return object.__getattribute__(self, name)
def __array__(self, t = None, context = None):
if t is not None:
return np.asarray(self.value).astype(t)
else:
return np.asarray(self.value, 'O')
def __array_wrap__(self, array, context):
return TaggedValue(array, self.unit)
def __repr__(self):
return 'TaggedValue(' + repr(self.value) + ', ' + repr(self.unit) + ')'
def __str__(self):
return str(self.value) + ' in ' + str(self.unit)
def __iter__(self):
class IteratorProxy(object):
def __init__(self, iter, unit):
self.iter = iter
self.unit = unit
def next(self):
value = self.iter.next()
return TaggedValue(value, self.unit)
return IteratorProxy(iter(self.value), self.unit)
def get_compressed_copy(self, mask):
new_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(new_value, self.unit)
def convert_to(self, unit):
#print 'convert to', unit, self.unit
if (unit == self.unit or not unit):
return self
new_value = self.unit.convert_value_to(self.value, unit)
return TaggedValue(new_value, unit)
def get_value(self):
return self.value
def get_unit(self):
return self.unit
class BasicUnit(object):
def __init__(self, name, fullname=None):
self.name = name
if fullname is None: fullname = name
self.fullname = fullname
self.conversions = dict()
def __repr__(self):
return 'BasicUnit(%s)'%self.name
def __str__(self):
return self.fullname
def __call__(self, value):
return TaggedValue(value, self)
def __mul__(self, rhs):
value = rhs
unit = self
if hasattr(rhs, 'get_unit'):
value = rhs.get_value()
unit = rhs.get_unit()
unit = unit_resolver('__mul__', (self, unit))
if (unit == NotImplemented):
return NotImplemented
return TaggedValue(value, unit)
def __rmul__(self, lhs):
return self*lhs
def __array_wrap__(self, array, context):
return TaggedValue(array, self)
def __array__(self, t=None, context=None):
ret = np.array([1])
if t is not None:
return ret.astype(t)
else:
return ret
def add_conversion_factor(self, unit, factor):
def convert(x):
return x*factor
self.conversions[unit] = convert
def add_conversion_fn(self, unit, fn):
self.conversions[unit] = fn
def get_conversion_fn(self, unit):
return self.conversions[unit]
def convert_value_to(self, value, unit):
#print 'convert value to: value ="%s", unit="%s"'%(value, type(unit)), self.conversions
conversion_fn = self.conversions[unit]
ret = conversion_fn(value)
return ret
def get_unit(self):
return self
class UnitResolver(object):
def addition_rule(self, units):
for unit_1, unit_2 in zip(units[:-1], units[1:]):
if (unit_1 != unit_2):
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if (len(non_null) > 1):
return NotImplemented
return non_null[0]
op_dict = {
'__mul__':multiplication_rule,
'__rmul__':multiplication_rule,
'__add__':addition_rule,
'__radd__':addition_rule,
'__sub__':addition_rule,
'__rsub__':addition_rule,
}
def __call__(self, operation, units):
if (operation not in self.op_dict):
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x:1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x,pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n/2,)
else:
return r'$%s\pi/2$' % (n,)
class BasicUnitConverter(units.ConversionInterface):
@staticmethod
def axisinfo(unit, axis):
'return AxisInfo instance for x and unit'
if unit==radians:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.fullname,
)
elif unit==degrees:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'),
label=unit.fullname,
)
elif unit is not None:
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
@staticmethod
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
#print 'convert checking iterable'
if iterable(val):
return [thisval.convert_to(unit).get_value() for thisval in val]
else:
return val.convert_to(unit).get_value()
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
return x.unit
def cos( x ):
if ( iterable(x) ):
result = []
for val in x:
result.append( math.cos( val.convert_to( radians ).get_value() ) )
return result
else:
return math.cos( x.convert_to( radians ).get_value() )
basicConverter = BasicUnitConverter()
units.registry[BasicUnit] = basicConverter
units.registry[TaggedValue] = basicConverter
| gpl-2.0 |
hsiaoyi0504/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
rockyzhengwu/mlpractice | word2vec/tf_word2vec.py | 1 | 9346 | #!/usr/bin/env python
# -*-coding=utf-8-*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# filename = maybe_download('text8.zip', 31344016)
filename = maybe_download('text8.zip', 33182194)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
| mit |
sinhrks/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
jampp/airflow | airflow/www/app.py | 3 | 67267 | from __future__ import print_function
from __future__ import division
from builtins import str
from past.utils import old_div
import copy
from datetime import datetime, timedelta
import dateutil.parser
from functools import wraps
import inspect
import json
import logging
import os
import socket
import sys
from flask._compat import PY2
from flask import (
Flask, url_for, Markup, Blueprint, redirect,
flash, Response, render_template)
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.form import DateTimePickerWidget
from flask.ext.admin import base
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.cache import Cache
from flask import request
import sqlalchemy as sqla
from wtforms import (
widgets,
Form, DateTimeField, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import chartkick
import jinja2
import markdown
from sqlalchemy import or_
import airflow
from airflow import jobs, login, models, settings, utils
from airflow.configuration import conf
from airflow.models import State
from airflow.settings import Session
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
from airflow import default_login as login
if conf.getboolean('webserver', 'AUTHENTICATE'):
try:
# Environment specific login
import airflow_login as login
except ImportError:
logging.error(
"authenticate is set to True in airflow.cfg, "
"but airflow_login failed to import")
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
if AUTHENTICATE is False:
login_required = lambda x: x
class VisiblePasswordInput(widgets.PasswordInput):
def __init__(self, hide_value=False):
self.hide_value = hide_value
class VisiblePasswordField(PasswordField):
widget = VisiblePasswordInput()
def superuser_required(f):
'''
Decorator for views requiring superuser access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
):
return f(*args, **kwargs)
else:
flash("This page requires superuser privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: pygment_html_render(x, lexers.BashLexer),
'hql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'sql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'doc': lambda x: pygment_html_render(x, lexers.TextLexer),
'doc_json': lambda x: pygment_html_render(x, lexers.JsonLexer),
'doc_rst': lambda x: pygment_html_render(x, lexers.RstLexer),
'doc_yaml': lambda x: pygment_html_render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: pygment_html_render(
inspect.getsource(x), lexers.PythonLexer),
}
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
utils.pessimistic_connection_handling()
app = Flask(__name__)
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = conf.get('webserver', 'SECRET_KEY')
login.login_manager.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
# Init for chartkick, the python wrapper for highcharts
ck = Blueprint(
'ck_page', __name__,
static_folder=chartkick.js(), static_url_path='/static')
app.register_blueprint(ck, url_prefix='/ck')
app.jinja_env.add_extension("chartkick.ext.charts")
@app.context_processor
def jinja_globals():
return {
'hostname': socket.gethostname(),
}
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class GraphForm(Form):
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
@app.route('/')
def index():
return redirect(url_for('admin.index'))
@app.route('/health')
def health():
""" We can add an array of tests here to check the server's health """
content = Markup(markdown.markdown("The server is healthy!"))
return content
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
admin = Admin(
app,
name="Airflow",
index_view=HomeView(name="DAGs"),
template_mode='bootstrap3')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).all()[0]
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return Response(
response=json.dumps(
payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return Response(
response=json.dumps(payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.dag_id].append(d)
return Response(
response=json.dumps(payload, indent=4),
status=200, mimetype="application/json")
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@app.errorhandler(404)
def circles(self):
return render_template('airflow/circles.html'), 404
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {k: v for k, v in request.headers}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
return Response(
response=json.dumps(d, indent=4),
status=200, mimetype="application/json")
@expose('/login')
def login(self):
return login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "/{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = BASE_LOG_FOLDER + log_relative
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
except:
log = "Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = (
"http://{host}:{WORKER_LOG_SERVER_PORT}/log"
"{log_relative}").format(**locals())
log += "Log file isn't local.\n"
log += "Fetching here: {url}\n".format(**locals())
try:
import requests
log += requests.get(url).text
except:
log += "Failed to fetch log file.".format(**locals())
session.commit()
session.close()
log = log.decode('utf-8') if PY2 else log
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/action')
@login_required
def action(self):
action = request.args.get('action')
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
if action == "run":
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
elif action == 'clear':
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
elif action == 'success':
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date == execution_date,
TI.task_id.in_(task_ids)).all()
if confirmed:
updated_task_ids = []
for ti in tis:
updated_task_ids.append(ti.task_id)
ti.state = State.SUCCESS
session.commit()
to_insert = list(set(task_ids) - set(updated_task_ids))
for task_id in to_insert:
ti = TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(task_ids)))
return redirect(origin)
else:
if not task_ids:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id in task_ids:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
if not base_date:
base_date = datetime.now()
else:
base_date = dateutil.parser.parse(base_date)
base_date = utils.round_time(base_date, dag.schedule_interval)
start_date = dag.start_date
if not start_date and 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
if start_date:
difference = base_date - start_date
offset = timedelta(seconds=int(difference.total_seconds() % dag.schedule_interval.total_seconds()))
base_date -= offset
base_date -= timedelta(microseconds=base_date.microsecond)
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
from_date = (base_date - (num_runs * dag.schedule_interval))
dates = utils.date_range(
from_date, base_date, dag.schedule_interval)
task_instances = {}
for ti in dag.get_task_instances(session, from_date):
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = []
def recurse_nodes(task):
children = [recurse_nodes(t) for t in task.upstream_list]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
utils.alchemy_to_dict(
task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
if len(dag.roots) > 1:
# d3 likes a single root
data = {
'name': 'root',
'instances': [],
'children': [recurse_nodes(t) for t in dag.roots]
}
elif len(dag.roots) == 1:
data = recurse_nodes(dag.roots[0])
else:
flash("No tasks found.", "error")
data = []
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = GraphForm(data={'execution_date': dttm, 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)
}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
data.append([
ti.execution_date.isoformat(), old_div((
ti.end_date - (
ti.execution_date + task.schedule_interval)
).total_seconds(),(60*60))
])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/paused')
@login_required
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
admin.add_view(Airflow(name='DAGs'))
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes="table table-bordered table-striped no-wrap",
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
admin.add_view(QueryView(name='Ad Hoc Query', category="Data Profiling"))
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_f(v, c, m, p):
color = State.color(m.state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{m.state}</span>'.format(**locals()))
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
mv = JobModelView(jobs.BaseJob, Session, name="Jobs", category="Browse")
admin.add_view(mv)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
mv = LogModelView(
models.Log, Session, name="Logs", category="Browse")
admin.add_view(mv)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'log')
can_delete = True
page_size = 500
mv = TaskInstanceModelView(
models.TaskInstance, Session, name="Task Instances", category="Browse")
admin.add_view(mv)
mv = DagModelView(
models.DagModel, Session, name=None)
admin.add_view(mv)
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port')
form_overrides = dict(_password=VisiblePasswordField)
form_widget_args = {
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
}
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('mssql', 'Microsoft SQL Server'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
mv = ConnectionModelView(
models.Connection, Session,
name="Connections", category="Admin")
admin.add_view(mv)
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
mv = UserModelView(models.User, Session, name="Users", category="Admin")
admin.add_view(mv)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
admin.add_view(ConfigurationView(name='Configuration', category="Admin"))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if AUTHENTICATE and not model.user_id and current_user:
model.user_id = current_user.id
model.last_modified = datetime.now()
mv = ChartModelView(
models.Chart, Session,
name="Charts", category="Data Profiling")
admin.add_view(mv)
admin.add_link(
base.MenuLink(
category='Docs',
name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(
category='Docs',
name='Github',
url='https://github.com/airbnb/airflow'))
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
mv = KnowEventView(
models.KnownEvent, Session, name="Known Events", category="Data Profiling")
admin.add_view(mv)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
mv = VariableView(
models.Variable, Session, name="Variables", category="Admin")
admin.add_view(mv)
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
mv = PoolModelView(models.Pool, Session, name="Pools", category="Admin")
admin.add_view(mv)
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
mv = SlaMissModelView(
models.SlaMiss, Session, name="SLA Misses", category="Browse")
admin.add_view(mv)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
admin.add_view(v)
for bp in flask_blueprints:
print(bp)
app.register_blueprint(bp)
for ml in menu_links:
admin.add_link(ml)
integrate_plugins()
| apache-2.0 |
MSHallOpenSoft/plotter | GUI_code.py | 1 | 78812 | #!/usr/bin/env python
import mayaviPlot
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtGui import *
from functions import Ui_DockWidget_2
#from plottingEquation_3d_explicit import MplPlot3dCanvas
from imp_plottingEquation import MplPlot2dCanvas
from PyQt4.QtCore import Qt, SIGNAL
from function_2 import Ui_DockWidget
import numpy as np
#import matplotlib.pyplot as plotter
from PyQt4.QtCore import pyqtSlot,SIGNAL,SLOT
i=1
import sys, random
from Thesidetab import mainFrame
from Thesidetab import tableCon
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class SplashScreen(QtGui.QWidget):
def __init__(self, pixmap):
QtGui.QWidget.__init__(self)
self._pixmap = pixmap
#self._message = QtCore.QString()
self._color = QtGui.QColor.black
self._alignment = QtCore.Qt.AlignLeft
self.setWindowFlags(QtCore.Qt.FramelessWindowHint |
QtCore.Qt.WindowStaysOnTopHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setFixedSize(self._pixmap.size())
self.setMask(self._pixmap.mask())
def clearMessage(self):
self._message.clear()
self.repaint()
def showMessage(self, message, alignment=QtCore.Qt.AlignLeft,
color=QtGui.QColor.black):
self._message = QtCore.QString(message)
self._alignment = alignment
self._color = color
self.repaint()
def paintEvent(self, event):
textbox = QtCore.QRect(self.rect())
textbox.setRect(textbox.x() + 5, textbox.y() + 5,
textbox.width() - 10, textbox.height() - 10)
painter = QtGui.QPainter(self)
painter.drawPixmap(self.rect(), self._pixmap)
painter.setPen(QtGui.QColor(self._color))
painter.drawText(textbox, self._alignment, self._message)
def mousePressEvent(self, event):
self.hide()
class Ui_MainWindow(QtGui.QMainWindow):
totalTabs=0
def __init__(self,parent):
QtGui.QWidget.__init__(self,parent)
self.parent=parent
self.setupUi(self)
QtGui.QShortcut(QtGui.QKeySequence("Esc"), self, self.showAll)
self.expression_list=[]
self.tabIdentifier=Ui_MainWindow.totalTabs
Ui_MainWindow.totalTabs+=1
def addNewEquationEditor(self,layout,spacer):
n = layout.count()
layout.removeItem(layout.itemAt(n-1))
dockWidgetContents = Exp_Form(self)
self.expression_list.append(dockWidgetContents)
layout.addWidget(dockWidgetContents)
layout.addItem(spacer)
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1396, 727)
MainWindow.setStyleSheet(_fromUtf8("\n"
"QFrame{\n"
"border:1px solid rgb(0, 0, 0);\n"
"border-radius:5px;\n"
"}\n"
"QHeaderView::section {\n"
" background-color:rgb(100, 100, 100);\n"
" color: rgb(200, 200, 200);\n"
"}\n"
"QStatusBar{ \n"
"background:qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0.33, stop:0 rgba(255, 255, 255, 255), stop:0.125 rgba(155, 174, 198, 255), stop:0.318182 rgba(104, 117, 133, 255), stop:0.534091 rgba(65, 73, 83, 255), stop:0.875 rgba(42, 47, 54, 255)); }\n"
" QMainWindow{\n"
" border:none; background-color:rgb(52, 52, 52); text-align: center; }\n"
" QGroupBox{ \n"
"background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:0.483136, y2:0.466, stop:0 rgba(219, 219, 219, 255), stop:1 rgba(255, 255, 255, 255)); }\n"
" QTabWidget{\n"
" background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:0.483136, y2:0.466, stop:0 rgba(219, 219, 219, 255), stop:1 rgba(255, 255, 255, 255)); } \n"
"QDockWidget{ background-color:#737373; border:none; padding:0px; } QSlider::groove:horizontal { background:red; height: 15px; position: absolute; left: 4px; right: 4px; } \n"
"QSlider::handle:horizontal {\n"
" height:20px; width: 10px; background: qlineargradient(spread:pad, x1:0, y1:0.477, x2:0, y2:0, stop:0.125 rgba(42, 47, 54, 255), stop:0.465909 rgba(65, 73, 83, 255), stop:0.681818 rgba(104, 117, 133, 255), stop:0.875 rgba(155, 174, 198, 255), stop:1 rgba(255, 255, 255, 255)); margin: -4px; } \n"
"QSlider::handle:hover:horizontal { height:20px; width: 10px; background:qlineargradient(spread:pad, x1:0, y1:0.477, x2:0, y2:0, stop:0.125 rgba(91, 95, 100, 255), stop:0.465909 rgba(122, 132, 146, 255), stop:0.681818 rgba(141, 153, 167, 255), stop:0.875 rgba(181, 195, 212, 255), stop:1 rgba(255, 255, 255, 255)); margin: -4px; }\n"
" QSlider::add-page:horizontal { background:rgb(170, 170, 170); }\n"
" QSlider::sub-page:horizontal { background: rgb(100, 100, 100) ; }\n"
" QToolButton{ position: relative; border: none; outline:none; color: black; padding: 4px; border-radius: 2px; font-size: 22px; }\n"
" QToolButton:hover:!pressed{ position: relative; border: none; outline:none; color: white; border-radius: 2px; font-size: 22px;padding: 0px; }\n"
" QPushButton{ position: relative; border:none; outline:none; background-color:qlineargradient(spread:pad, x1:0, y1:0.164, x2:0, y2:0, stop:0.125 rgba(36, 41, 47, 255), stop:0.465909 rgba(52, 59, 67, 255), stop:0.681818 rgba(80, 91, 103, 255), stop:0.875 rgba(117, 132, 150, 255), stop:1 rgba(186, 186, 186, 255)); color: rgb(170, 170, 170); padding: 6px 20px; border-radius: 2px; font-size: 20px; } \n"
"QPushButton:hover:!pressed{ position: relative; border: none; outline:none; background-color:rgb(60, 69, 79); color: white; padding: 6px 20px; border-radius: 2px; font-size:20px; } \n"
"QComboBox { border: none; padding: 1px 18px 1px 3px; } QComboBox, QComboBox:drop-down { background:qlineargradient(spread:pad, x1:0, y1:0.097, x2:0, y2:0, stop:0 rgba(100, 100, 100, 255), stop:0.892045 rgba(149, 149, 149, 255));color: rgb(200, 200, 200); } \n"
"\n"
"\n"
"QComboBox:on, QComboBox:drop-down:on { background:qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0.869318, stop:0.107955 rgba(149, 149, 149, 255), stop:1 rgba(100, 100, 100, 255));color: rgb(200, 200, 200); }\n"
" QComboBox:on { padding-top: 3px; padding-left: 4px; } \n"
"QComboBox::drop-down { subcontrol-origin: padding; subcontrol-position: top right; width: 15px; border-left-width: 1px; border-left-color: darkgray; border-left-style: solid; }\n"
" QComboBox::down-arrow { image:url(:/arrow/Icons/arrow-new.png); } QComboBox::down-arrow:on { top: 1px; left: 1px; }\n"
" QMenu { background-color: rgb(52, 52, 52); border: none; } \n"
"QMenu::item { background-color: transparent; }\n"
" QMenu::item:selected { background-color:rgb(100, 100, 100); } \n"
"QMenuBar { background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 #DBDBDB, stop:1 rgba(255, 255, 255, 255)) }\n"
" QMenuBar::item { spacing: 3px; padding: 1px 4px; background: transparent; border-radius: 2px; } \n"
"QMenuBar::item:selected { background:#737373; } \n"
"QMenuBar::item:pressed { background: #414953; }\n"
" QTableWidget{ background:rgb(25, 25, 25); border:none; color:white; border: 1px solid white; } \n"
"QTextEdit{\n"
" background:rgb(25, 25, 25);\n"
"color:rgb(255, 255, 255);\n"
" } \n"
"QScrollBar:horizontal { border: none; background: rgb(100, 100, 100); height: 15px; margin: 0px 20px 0px 20px; } \n"
"QScrollBar::handle:horizontal { background:qlineargradient(spread:pad, x1:0, y1:0.164, x2:0, y2:0, stop:0.125 rgba(36, 41, 47, 255), stop:0.465909 rgba(52, 59, 67, 255), stop:0.681818 rgba(80, 91, 103, 255), stop:0.875 rgba(117, 132, 150, 255), stop:1 rgba(186, 186, 186, 255)); min-width: 20px; } QScrollBar::handle:horizontal:hover { background:qlineargradient(spread:pad, x1:0, y1:0.164, x2:0, y2:0, stop:0.125 rgba(47, 47, 47, 255), stop:0.465909 rgba(67, 67, 67, 255), stop:0.681818 rgba(103, 103, 103, 255), stop:0.875 rgba(150, 150, 150, 255), stop:1 rgba(186, 186, 186, 255)); min-width: 20px; }\n"
" QScrollBar::add-line:horizontal { border: none; background:#DBDBDB; width: 20px; subcontrol-position: right; subcontrol-origin: margin; }\n"
" QScrollBar::sub-line:horizontal { border:none; background:#DBDBDB; width: 20px; subcontrol-position: left; subcontrol-origin: margin; } \n"
"QScrollBar::add-line:horizontal:hover:!pressed { border: none; background: qlineargradient(spread:pad, x1:0, y1:0.164, x2:0, y2:0, stop:0.125 rgba(36, 41, 47, 255), stop:0.465909 rgba(52, 59, 67, 255), stop:0.681818 rgba(80, 91, 103, 255), stop:0.875 rgba(117, 132, 150, 255), stop:1 rgba(186, 186, 186, 255)); width: 20px; subcontrol-position: right; subcontrol-origin: margin; } \n"
"QScrollBar::sub-line:horizontal:hover:!pressed { border:none; background: qlineargradient(spread:pad, x1:0, y1:0.164, x2:0, y2:0, stop:0.125 rgba(36, 41, 47, 255), stop:0.465909 rgba(52, 59, 67, 255), stop:0.681818 rgba(80, 91, 103, 255), stop:0.875 rgba(117, 132, 150, 255), stop:1 rgba(186, 186, 186, 255)); width: 20px; subcontrol-position: left; subcontrol-origin: margin; } \n"
"QScrollBar::left-arrow:horizontal{ image: url(:/arrow/Icons/left-arrow.png); } QScrollBar::right-arrow:horizontal{ image: url(:/arrow/Icons/right-arrow.png); } \n"
"QScrollBar:vertical { border: none; background: rgb(100, 100, 100); width: 15px; margin: 20px 0px 20px 0px; } \n"
"QScrollBar::handle:vertical { background:qlineargradient(spread:pad, x1:0.136, y1:0, x2:0, y2:0, stop:0.125 rgba(36, 41, 47, 255), stop:0.465909 rgba(52, 59, 67, 255), stop:0.681818 rgba(80, 91, 103, 255), stop:0.875 rgba(117, 132, 150, 255), stop:1 rgba(186, 186, 186, 255)); min-height: 20px; } QScrollBar::handle:vertical:hover { background:qlineargradient(spread:pad, x1:0.136, y1:0, x2:0, y2:0, stop:0.125 rgba(47, 47, 47, 255), stop:0.465909 rgba(67, 67, 67, 255), stop:0.681818 rgba(103, 103, 103, 255), stop:0.875 rgba(150, 150, 150, 255), stop:1 rgba(186, 186, 186, 255)); min-height: 15px; } \n"
"QScrollBar::add-line:vertical { border: none; background:#DBDBDB; height: 20px; subcontrol-position: bottom; subcontrol-origin: margin; } QScrollBar::sub-line:vertical { border:none; background:#DBDBDB; height: 20px; subcontrol-position: top; subcontrol-origin: margin; } \n"
"QScrollBar::add-line:vertical:hover:!pressed { border: none; background: qlineargradient(spread:pad, x1:0.136, y1:0, x2:0, y2:0, stop:0.125 rgba(36, 41, 47, 255), stop:0.465909 rgba(52, 59, 67, 255), stop:0.681818 rgba(80, 91, 103, 255), stop:0.875 rgba(117, 132, 150, 255), stop:1 rgba(186, 186, 186, 255)); height: 20px; subcontrol-position:bottom; subcontrol-origin: margin; }\n"
" QScrollBar::sub-line:vertical:hover:!pressed { border:none; background: qlineargradient(spread:pad, x1:0.136, y1:0, x2:0, y2:0, stop:0.125 rgba(36, 41, 47, 255), stop:0.465909 rgba(52, 59, 67, 255), stop:0.681818 rgba(80, 91, 103, 255), stop:0.875 rgba(117, 132, 150, 255), stop:1 rgba(186, 186, 186, 255)); height: 20px; subcontrol-position:top; subcontrol-origin: margin; } \n"
"QScrollBar::up-arrow:vertical{ image: url(:/arrow/Icons/up-arrow.png); } QScrollBar::down-arrow:vertical{ image: url(:/arrow/Icons/down-arrow.png); \n"
"}\n"))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.frame_2 = QtGui.QFrame(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_2.sizePolicy().hasHeightForWidth())
self.frame_2.setSizePolicy(sizePolicy)
self.frame_2.setMinimumSize(QtCore.QSize(20, 0))
self.frame_2.setStyleSheet(_fromUtf8(""))
self.frame_2.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtGui.QFrame.Raised)
self.frame_2.setObjectName(_fromUtf8("frame_2"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.frame_2)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.horizontalLayout_4.setMargin(0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setContentsMargins(0,0,0,0)
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.pushButton = QtGui.QPushButton(self.frame_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setMaximumSize(QtCore.QSize(20, 50))
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/double-right.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout_5.addWidget(self.pushButton)
self.horizontalLayout_4.addLayout(self.verticalLayout_5)
self.horizontalLayout_3.addWidget(self.frame_2)
self.frame = tableCon.TableContents(self.centralwidget,self.frame_2,self.pushButton)
#self.frame.hide()
# sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
# self.frame.setSizePolicy(sizePolicy)
# self.frame.setMaximumSize(QtCore.QSize(320, 16777215))
# self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
# self.frame.setFrameShadow(QtGui.QFrame.Raised)
# self.frame.setObjectName(_fromUtf8("frame"))
# self.verticalLayout_3 = QtGui.QVBoxLayout(self.frame)
# self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
# self.horizontalLayout_5 = QtGui.QHBoxLayout()
# self.horizontalLayout_5.setSpacing(6)
# self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
# self.pushButton_3 = QtGui.QPushButton(self.frame)
# self.pushButton_3.setEnabled(True)
# sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())
# self.pushButton_3.setSizePolicy(sizePolicy)
# self.pushButton_3.setMinimumSize(QtCore.QSize(50, 0))
# self.pushButton_3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# self.pushButton_3.setStyleSheet(_fromUtf8(""))
# self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
# self.horizontalLayout_5.addWidget(self.pushButton_3)
# self.toolButton_7 = QtGui.QToolButton(self.frame)
# self.toolButton_7.setMinimumSize(QtCore.QSize(10, 0))
# self.toolButton_7.setMaximumSize(QtCore.QSize(35, 16777215))
# self.toolButton_7.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# self.toolButton_7.setStyleSheet(_fromUtf8(""))
# icon1 = QtGui.QIcon()
# icon1.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Add-New-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# self.toolButton_7.setIcon(icon1)
# self.toolButton_7.setIconSize(QtCore.QSize(40, 30))
# self.toolButton_7.setObjectName(_fromUtf8("toolButton_7"))
# self.horizontalLayout_5.addWidget(self.toolButton_7)
# self.toolButton_9 = QtGui.QToolButton(self.frame)
# self.toolButton_9.setMinimumSize(QtCore.QSize(10, 0))
# self.toolButton_9.setMaximumSize(QtCore.QSize(35, 16777215))
# self.toolButton_9.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# self.toolButton_9.setStyleSheet(_fromUtf8(""))
# icon2 = QtGui.QIcon()
# icon2.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Minus-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# self.toolButton_9.setIcon(icon2)
# self.toolButton_9.setIconSize(QtCore.QSize(40, 30))
# self.toolButton_9.setObjectName(_fromUtf8("toolButton_9"))
# self.horizontalLayout_5.addWidget(self.toolButton_9)
# self.toolButton_8 = QtGui.QToolButton(self.frame)
# self.toolButton_8.setMinimumSize(QtCore.QSize(10, 0))
# self.toolButton_8.setMaximumSize(QtCore.QSize(35, 16777215))
# self.toolButton_8.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# self.toolButton_8.setStyleSheet(_fromUtf8(""))
# icon3 = QtGui.QIcon()
# icon3.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Folder-Open-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# self.toolButton_8.setIcon(icon3)
# self.toolButton_8.setIconSize(QtCore.QSize(40, 30))
# self.toolButton_8.setObjectName(_fromUtf8("toolButton_8"))
# self.horizontalLayout_5.addWidget(self.toolButton_8)
# self.toolButton_5 = QtGui.QToolButton(self.frame)
# self.toolButton_5.setMinimumSize(QtCore.QSize(10, 0))
# self.toolButton_5.setMaximumSize(QtCore.QSize(35, 16777215))
# self.toolButton_5.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# self.toolButton_5.setStyleSheet(_fromUtf8(""))
# icon4 = QtGui.QIcon()
# icon4.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Save-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# self.toolButton_5.setIcon(icon4)
# self.toolButton_5.setIconSize(QtCore.QSize(40, 30))
# self.toolButton_5.setObjectName(_fromUtf8("toolButton_5"))
# self.horizontalLayout_5.addWidget(self.toolButton_5)
# spacerItem = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
# self.horizontalLayout_5.addItem(spacerItem)
# self.verticalLayout_3.addLayout(self.horizontalLayout_5)
# self.tableWidget = QtGui.QTableWidget(self.frame)
# sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.tableWidget.sizePolicy().hasHeightForWidth())
# self.tableWidget.setSizePolicy(sizePolicy)
# self.tableWidget.setMinimumSize(QtCore.QSize(300, 0))
# self.tableWidget.setStyleSheet(_fromUtf8(""))
# self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
# self.tableWidget.setColumnCount(3)
# self.tableWidget.setRowCount(0)
# item = QtGui.QTableWidgetItem()
# self.tableWidget.setHorizontalHeaderItem(0, item)
# item = QtGui.QTableWidgetItem()
# self.tableWidget.setHorizontalHeaderItem(1, item)
# item = QtGui.QTableWidgetItem()
# self.tableWidget.setHorizontalHeaderItem(2, item)
# self.header=self.tableWidget.horizontalHeader();
# self.header.setResizeMode(QHeaderView.Stretch);
# self.verticalLayout_3.addWidget(self.tableWidget)
# self.pushButton_21 = QtGui.QPushButton(self.frame)
# sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.pushButton_21.sizePolicy().hasHeightForWidth())
# self.pushButton_21.setSizePolicy(sizePolicy)
# self.pushButton_21.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# self.pushButton_21.setStyleSheet(_fromUtf8(""))
# self.pushButton_21.setObjectName(_fromUtf8("pushButton_21"))
# self.verticalLayout_3.addWidget(self.pushButton_21)
#remove above from code
self.horizontalLayout_3.addWidget(self.frame)
self.verticalLayout_6 = QtGui.QVBoxLayout()
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
'''self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.tabWidget.addTab(self.tab, "2D Graph")
self.tab.setVisible(False)'''
#self.tab_2 = QtGui.QWidget()
#self.tab_2.setObjectName(_fromUtf8("tab_2"))
contents_2=QtGui.QWidget(self.tabWidget)
layout_2= QtGui.QVBoxLayout(contents_2)
sc_2=MplPlot2dCanvas(self)
self.sc_2=sc_2
widget_2=QtGui.QWidget(self)
layout_2.addWidget(sc_2)
self.tabWidget.addTab(contents_2, "2D Graph")
contents = QtGui.QWidget(self.tabWidget)
layout = QtGui.QVBoxLayout(contents)
widget_1 = QtGui.QWidget(self)
self.mayavi_widget = mayaviPlot.MayaviQWidget(self)
layout.addWidget(self.mayavi_widget)
self.tabWidget.addTab(contents, "3D Graph")
self.verticalLayout_6.addWidget(self.tabWidget)
# self.wrewidget = QtGui.QWidget()
# self.wrewidget.setLayout(self.verticalLayout_6)
self.horizontalLayout_3.addLayout(self.verticalLayout_6)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1396, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
self.menuView = QtGui.QMenu(self.menubar)
self.menuView.setObjectName(_fromUtf8("menuView"))
self.menuAbout = QtGui.QMenu(self.menubar)
self.menuAbout.setObjectName(_fromUtf8("menuAbout"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.dockWidget = QtGui.QDockWidget(MainWindow)
self.dockWidget.setObjectName(_fromUtf8("dockWidget"))
self.dockWidget.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
#self.dockWidgetContents.SetName("plot1")
#self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
#self.dockWidget.setGeometry
self.dockWidgetContents = mainFrame.DockContents(self)
#self.dockWidgetContents.setStyleSheet("QWidget{ background-color:#737373; border:none; padding:0px; }")
self.dockWidget.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget)
self.dockWidget_3 = QtGui.QDockWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dockWidget_3.sizePolicy().hasHeightForWidth())
self.dockWidget_3.setSizePolicy(sizePolicy)
self.dockWidget_3.setMinimumSize(QtCore.QSize(489, 70))
self.dockWidget_3.setMaximumSize(QtCore.QSize(524287, 524287))
self.dockWidget_3.setObjectName(_fromUtf8("dockWidget_3"))
self.dockWidgetContents_3 = QtGui.QWidget()
self.dockWidgetContents_3.setObjectName(_fromUtf8("dockWidgetContents_3"))
self.horizontalLayout = QtGui.QHBoxLayout(self.dockWidgetContents_3)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setSpacing(0)
self.toolButton_17 = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton_17.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_17.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_17.setStyleSheet(_fromUtf8(""))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Item-New-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_17.setIcon(icon5)
self.toolButton_17.setIconSize(QtCore.QSize(30, 30))
self.toolButton_17.setObjectName(_fromUtf8("toolButton_17"))
self.horizontalLayout.addWidget(self.toolButton_17)
self.toolButton_10 = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton_10.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_10.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_10.setStyleSheet(_fromUtf8(""))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Folder-Open-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_10.setIcon(icon3)
self.toolButton_10.setIconSize(QtCore.QSize(30, 30))
self.toolButton_10.setObjectName(_fromUtf8("toolButton_10"))
self.horizontalLayout.addWidget(self.toolButton_10)
self.toolButton_20 = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton_20.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_20.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_20.setStyleSheet(_fromUtf8(""))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Save-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_20.setIcon(icon4)
self.toolButton_20.setIconSize(QtCore.QSize(30, 30))
self.toolButton_20.setObjectName(_fromUtf8("toolButton_20"))
self.horizontalLayout.addWidget(self.toolButton_20)
self.toolButton_18 = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton_18.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_18.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_18.setStyleSheet(_fromUtf8(""))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Open-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_18.setIcon(icon6)
self.toolButton_18.setIconSize(QtCore.QSize(30, 30))
self.toolButton_18.setObjectName(_fromUtf8("toolButton_18"))
self.horizontalLayout.addWidget(self.toolButton_18)
'''self.line_4 = QtGui.QFrame(self.dockWidgetContents_3)
self.line_4.setMaximumSize(QtCore.QSize(16777215, 20))
self.line_4.setFrameShape(QtGui.QFrame.VLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName(_fromUtf8("line_4"))
self.horizontalLayout.addWidget(self.line_4)'''
'''self.toolButton_4 = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton_4.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_4.setStyleSheet(_fromUtf8(""))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Add-New-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_4.setIcon(icon1)
self.toolButton_4.setIconSize(QtCore.QSize(30, 30))
self.toolButton_4.setObjectName(_fromUtf8("toolButton_4"))
self.horizontalLayout.addWidget(self.toolButton_4)'''
'''self.toolButton_3 = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton_3.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_3.setStyleSheet(_fromUtf8(""))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Minus-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_3.setIcon(icon2)
self.toolButton_3.setIconSize(QtCore.QSize(30, 30))
self.toolButton_3.setObjectName(_fromUtf8("toolButton_3"))
self.horizontalLayout.addWidget(self.toolButton_3)'''
'''self.line_5 = QtGui.QFrame(self.dockWidgetContents_3)
self.line_5.setMaximumSize(QtCore.QSize(16777215, 20))
self.line_5.setFrameShape(QtGui.QFrame.VLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName(_fromUtf8("line_5"))
self.horizontalLayout.addWidget(self.line_5)'''
'''self.checkBox = QtGui.QCheckBox(self.dockWidgetContents_3)
self.checkBox.setMaximumSize(QtCore.QSize(20, 25))
self.checkBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.checkBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.checkBox.setText(_fromUtf8(""))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.horizontalLayout.addWidget(self.checkBox)'''
'''self.Example = QtGui.QToolButton(self.dockWidgetContents_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Example.sizePolicy().hasHeightForWidth())
self.Example.setSizePolicy(sizePolicy)
self.Example.setMaximumSize(QtCore.QSize(16777215, 25))
self.Example.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.Example.setStyleSheet(_fromUtf8("QToolButton{\n"
"font-size: 15px;\n"
"color:rgb(255, 255, 255);\n"
"}"))
self.Example.setIconSize(QtCore.QSize(24, 24))
self.Example.setObjectName(_fromUtf8("Example"))
self.horizontalLayout.addWidget(self.Example)'''
'''self.line_6 = QtGui.QFrame(self.dockWidgetContents_3)
self.line_6.setMaximumSize(QtCore.QSize(16777215, 20))
self.line_6.setFrameShape(QtGui.QFrame.VLine)
self.line_6.setFrameShadow(QtGui.QFrame.Sunken)
self.line_6.setObjectName(_fromUtf8("line_6"))
self.horizontalLayout.addWidget(self.line_6)'''
'''self.toolButton = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton.setStyleSheet(_fromUtf8(""))
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Board-Pin-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon7)
self.toolButton.setIconSize(QtCore.QSize(30, 30))
self.toolButton.setObjectName(_fromUtf8("toolButton"))
self.horizontalLayout.addWidget(self.toolButton)'''
self.toolButton_25 = QtGui.QToolButton(self.dockWidgetContents_3)
self.toolButton_25.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_25.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_25.setStyleSheet(_fromUtf8(""))
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Table-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_25.setIcon(icon8)
self.toolButton_25.setIconSize(QtCore.QSize(30, 30))
self.toolButton_25.setObjectName(_fromUtf8("toolButton_25"))
self.horizontalLayout.addWidget(self.toolButton_25)
'''self.line_8 = QtGui.QFrame(self.dockWidgetContents_3)
self.line_8.setMaximumSize(QtCore.QSize(16777215, 20))
self.line_8.setFrameShape(QtGui.QFrame.VLine)
self.line_8.setFrameShadow(QtGui.QFrame.Sunken)
self.line_8.setObjectName(_fromUtf8("line_8"))
self.horizontalLayout.addWidget(self.line_8)'''
self.dockWidget_3.setWidget(self.dockWidgetContents_3)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(4), self.dockWidget_3)
self.dockWidget_4 = QtGui.QDockWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dockWidget_4.sizePolicy().hasHeightForWidth())
self.dockWidget_4.setSizePolicy(sizePolicy)
self.dockWidget_4.setMinimumSize(QtCore.QSize(624, 70))
self.dockWidget_4.setMaximumSize(QtCore.QSize(524287, 70))
self.dockWidget_4.setObjectName(_fromUtf8("dockWidget_4"))
self.dockWidgetContents_4 = QtGui.QWidget()
self.dockWidgetContents_4.setObjectName(_fromUtf8("dockWidgetContents_4"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.dockWidgetContents_4)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setSpacing(0)
self.line_7 = QtGui.QFrame(self.dockWidgetContents_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line_7.sizePolicy().hasHeightForWidth())
self.line_7.setSizePolicy(sizePolicy)
self.line_7.setMaximumSize(QtCore.QSize(16777215, 20))
self.line_7.setLineWidth(1)
self.line_7.setMidLineWidth(1)
self.line_7.setFrameShape(QtGui.QFrame.VLine)
self.line_7.setFrameShadow(QtGui.QFrame.Sunken)
self.line_7.setObjectName(_fromUtf8("line_7"))
self.horizontalLayout_2.addWidget(self.line_7)
self.toolButton_19 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_19.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_19.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_19.setStyleSheet(_fromUtf8(""))
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Keyboard-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_19.setIcon(icon9)
self.toolButton_19.setIconSize(QtCore.QSize(35, 35))
self.toolButton_19.setObjectName(_fromUtf8("toolButton_19"))
self.horizontalLayout_2.addWidget(self.toolButton_19)
'''self.toolButton_23 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_23.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_23.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_23.setStyleSheet(_fromUtf8(""))
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Printer-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_23.setIcon(icon10)
self.toolButton_23.setIconSize(QtCore.QSize(35, 35))
self.toolButton_23.setObjectName(_fromUtf8("toolButton_23"))
self.horizontalLayout_2.addWidget(self.toolButton_23)'''
'''self.toolButton_2 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_2.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_2.setIcon(icon4)
self.toolButton_2.setIconSize(QtCore.QSize(35, 35))
self.toolButton_2.setObjectName(_fromUtf8("toolButton_2"))
self.horizontalLayout_2.addWidget(self.toolButton_2)'''
self.toolButton_24 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_24.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_24.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_24.setStyleSheet(_fromUtf8(""))
self.toolButton_24.clicked.connect(self.saveImage)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Camera-02-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_24.setIcon(icon11)
self.toolButton_24.setIconSize(QtCore.QSize(35, 35))
self.toolButton_24.setObjectName(_fromUtf8("toolButton_24"))
self.horizontalLayout_2.addWidget(self.toolButton_24)
'''self.toolButton_22 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_22.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_22.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_22.setStyleSheet(_fromUtf8(""))
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Facebook-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_22.setIcon(icon12)
self.toolButton_22.setIconSize(QtCore.QSize(35, 35))
self.toolButton_22.setObjectName(_fromUtf8("toolButton_22"))
self.horizontalLayout_2.addWidget(self.toolButton_22)'''
self.line_3 = QtGui.QFrame(self.dockWidgetContents_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line_3.sizePolicy().hasHeightForWidth())
self.line_3.setSizePolicy(sizePolicy)
self.line_3.setMaximumSize(QtCore.QSize(16777215, 20))
self.line_3.setFrameShape(QtGui.QFrame.VLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.horizontalLayout_2.addWidget(self.line_3)
'''self.toolButton_21 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_21.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_21.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_21.setStyleSheet(_fromUtf8(""))
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Media-Play-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_21.setIcon(icon13)
self.toolButton_21.setIconSize(QtCore.QSize(35, 35))
self.toolButton_21.setObjectName(_fromUtf8("toolButton_21"))
self.horizontalLayout_2.addWidget(self.toolButton_21)'''
'''self.toolButton_16 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_16.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_16.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_16.setStyleSheet(_fromUtf8(""))
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Stop-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_16.setIcon(icon14)
self.toolButton_16.setIconSize(QtCore.QSize(35, 35))
self.toolButton_16.setObjectName(_fromUtf8("toolButton_16"))'''
#self.horizontalLayout_2.addWidget(self.toolButton_16)
self.line_2 = QtGui.QFrame(self.dockWidgetContents_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line_2.sizePolicy().hasHeightForWidth())
self.line_2.setSizePolicy(sizePolicy)
self.line_2.setMaximumSize(QtCore.QSize(16777215, 20))
self.line_2.setFrameShape(QtGui.QFrame.VLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.horizontalLayout_2.addWidget(self.line_2)
self.toolButton_15 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_15.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_15.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_15.setStyleSheet(_fromUtf8(""))
icon15 = QtGui.QIcon()
icon15.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Full-Screen-Expand-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_15.setIcon(icon15)
self.toolButton_15.setIconSize(QtCore.QSize(35, 35))
self.toolButton_15.setObjectName(_fromUtf8("toolButton_15"))
self.horizontalLayout_2.addWidget(self.toolButton_15)
'''self.toolButton_14 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_14.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_14.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_14.setStyleSheet(_fromUtf8(""))
icon16 = QtGui.QIcon()
icon16.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Full-Screen-Collapse-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_14.setIcon(icon16)
self.toolButton_14.setIconSize(QtCore.QSize(35, 35))
self.toolButton_14.setObjectName(_fromUtf8("toolButton_14"))
self.horizontalLayout_2.addWidget(self.toolButton_14)'''
self.line = QtGui.QFrame(self.dockWidgetContents_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line.sizePolicy().hasHeightForWidth())
self.line.setSizePolicy(sizePolicy)
self.line.setMaximumSize(QtCore.QSize(16777215, 20))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.horizontalLayout_2.addWidget(self.line)
'''self.toolButton_13 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_13.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_13.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_13.setStyleSheet(_fromUtf8(""))
icon17 = QtGui.QIcon()
icon17.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Magnifying-Glass-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_13.setIcon(icon17)
self.toolButton_13.setIconSize(QtCore.QSize(35, 35))
self.toolButton_13.setObjectName(_fromUtf8("toolButton_13"))
self.horizontalLayout_2.addWidget(self.toolButton_13)'''
'''self.toolButton_12 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_12.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_12.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_12.setStyleSheet(_fromUtf8(""))
icon18 = QtGui.QIcon()
icon18.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Zoom-In-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_12.setIcon(icon18)
self.toolButton_12.setIconSize(QtCore.QSize(35, 35))
self.toolButton_12.setObjectName(_fromUtf8("toolButton_12"))
self.horizontalLayout_2.addWidget(self.toolButton_12)'''
'''self.toolButton_11 = QtGui.QToolButton(self.dockWidgetContents_4)
self.toolButton_11.setMaximumSize(QtCore.QSize(16777215, 25))
self.toolButton_11.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.toolButton_11.setAutoFillBackground(False)
self.toolButton_11.setStyleSheet(_fromUtf8(""))
icon19 = QtGui.QIcon()
icon19.addPixmap(QtGui.QPixmap(_fromUtf8("Icons/Zoom-Out-48.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_11.setIcon(icon19)
self.toolButton_11.setIconSize(QtCore.QSize(35, 35))
self.toolButton_11.setObjectName(_fromUtf8("toolButton_11"))
self.horizontalLayout_2.addWidget(self.toolButton_11)'''
self.spacerItem = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(self.spacerItem)
self.dockWidget_4.setWidget(self.dockWidgetContents_4)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(4), self.dockWidget_4)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.pushButton.setVisible(False)
self.frame_2.setVisible(False)
self.pushButton.clicked.connect(self.show_2)
self.toolButton_19.clicked.connect(self.parent.showKeyboard)
# self.toolButton_8.clicked.connect(self.showFileChooser)
# self.toolButton_7.clicked.connect(self.addRowDataPoint)
# self.toolButton_9.clicked.connect(self.removeRowDataPoint)
# self.toolButton_5.clicked.connect(self.saveDataValuesToFile)
self.toolButton_15.clicked.connect(self.hideAll)
self.toolButton_25.clicked.connect(self.showTable)
self.toolButton_17.clicked.connect(self.parent.add_page)
#self.toolButton.clicked.connect(self.still)
self.action_1=self.dockWidget_3.toggleViewAction()
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def showTable(self):
if self.frame.isVisible()==True:
self.hide_2()
else:
self.show_2()
def saveImage(self,event):
#self.sc_2.plotobj.save('foo.pdf')
#self.mayavi_widget.visualization.scene.mlab.savefig('doo.pdf')
ind=self.tabWidget.currentIndex()
if ind==0:
filename = QFileDialog.getSaveFileName(self,"Save Ouptut File", "", 'PNG (*.png), JPG(*.jpg), PDF(*.pdf), EPS(*.eps)')
print(filename)
self.sc_2.plotobj.save(filename)
elif ind==1:
filename = QFileDialog.getSaveFileName(self,"Save Ouptut File", "", 'PNG (*.png), JPG(*.jpg), PDF(*.pdf), EPS(*.eps)')
print(filename)
self.mayavi_widget.visualization.scene.mlab.savefig(filename)
#print(self.tabWidget.currentIndex())
print("saving graph")
def hideAll(self):
self.dockWidget.hide()
self.dockWidget_4.hide()
self.dockWidget_3.hide()
self.frame.hide()
def showAll(self):
self.dockWidget.show()
self.dockWidget_4.show()
self.dockWidget_3.show()
self.frame.show()
def saveDataValuesToFile(self):
pname = ex.getTabName()
import csv
#savFile = open(pname+'.csv','w')
with open(pname+".csv",'w') as output:
writeHead = csv.writer(output,delimiter=',',lineterminator='\n')
for i in range (0,self.tableWidget.rowCount()):
row = list()
for j in range (0,3):
try :
item = self.tableWidget.item(i,j).text()
except Exception,e:
continue
#toInt = int(item)
print item
row.append(item)
#print row
writeHead.writerow(row)
#savFile.close()
def addRowDataPoint(self):
rowC = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowC)
def removeRowDataPoint(self):
if(self.tableWidget.currentRow()==-1):
self.errorRemoveDataPoint()
else:
self.tableWidget.removeRow(self.tableWidget.currentRow())
self.tableWidget.setCurrentCell(-1,-1)
def errorRemoveDataPoint(self):
Dialog = QtGui.QDialog()
u = Ui_Dialog_2("Please select the data point to remove")
u.setupUi(Dialog)
Dialog.exec_()
# For Hand Cursor
def hand_cursor(self,widget):
widget.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
def showFileChooser(self):
fname = QtGui.QFileDialog.getOpenFileName(self,'Select File')
self.delimit=','
import csv
f=open(fname,'rt')
try:
reader = csv.reader(f)
num_rows = 0
for row in reader:
num_rows=num_rows+1
with open(fname,'r') as fil :
text = fil.read()
count_comma=0
for char in text:
if char==',':
count_comma=count_comma+1
if count_comma != (num_rows * 2) : # i.e. it is NOT a csv file
self.showSelectDelimiter()
self.tableWidget.setRowCount(num_rows)
## create items in all added
rowno_=0
f=open(fname,'rt')
reader=csv.reader(f,delimiter=self.delimit)
try:
for row in reader:
for col in range (0,3):
float(row[col])
item = QtGui.QTableWidgetItem(row[col])
self.tableWidget.setItem(rowno_,col,item)
rowno_=rowno_+1
self.tableWidget.setRowCount(rowno_)
except Exception, e:
self.showU=self.showInvalidValueError()
self.tableWidget.setRowCount(0)
finally:
f.close()
def showInvalidValueError(self):
Dialog = QtGui.QDialog()
u = Ui_Dialog_2('Cannnot import values ! Data Invalid !')
u.setupUi(Dialog)
Dialog.exec_()
def showSelectDelimiter(self):
Dialog = QtGui.QDialog()
u = Ui_Dialog()
u.setupUi(Dialog)
dialg = StartDialog()
if dialg.exec_():
self.delimit = dialg.getDelim()
#self.showFileChooser()
def setCurrentTable(self,table5):
self.tableWidget = table5
self.verticalLayout_3.insert(table5,1)
self.verticalLayout_3.takeAt(2)
def hide_2(self):
self.frame.hide()
self.frame_2.show()
self.pushButton.show()
self.parent.parent.actionTable.setChecked(False)
def show_2(self):
self.frame.show()
self.frame_2.hide()
self.pushButton.hide()
self.parent.parent.actionTable.setChecked(True)
def add_page(self):
#self.pages.append(self.create_page(self.create_new_page_button(),self.create_new_page_button_2()))
contents = QtGui.QWidget(self.tabWidget)
layout = QtGui.QVBoxLayout(contents)
# add other widgets to the contents layout here
# i.e. layout.addWidget(widget), etc
widget_1 = QtGui.QWidget(self)
#l = QtGui.QVBoxLayout(widget_1)
#textbox=customLineEdit(self)
sc = MayaviQWidget(widget_1)
#l.addWidget(sc)
#l.addWidget(textbox)
layout.addWidget(sc)
#layout.addWidget(self.create_new_page_button(),1,Qt.AlignTop)
#layout.addWidget(self.create_new_page_button_2(),15,Qt.AlignTop)
global i
i+=1
self.tabWidget.addTab( contents , 'Untitled'+str(i))
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Plot It", None))
# self.pushButton_3.setText(_translate("MainWindow", "Hide", None))
# self.toolButton_7.setToolTip(_translate("MainWindow", "Add", None))
# self.toolButton_7.setText(_translate("MainWindow", "...", None))
# self.toolButton_9.setToolTip(_translate("MainWindow", "Remove", None))
# self.toolButton_9.setText(_translate("MainWindow", "...", None))
# self.toolButton_8.setToolTip(_translate("MainWindow", "Import Coordinates", None))
# self.toolButton_8.setText(_translate("MainWindow", "...", None))
# self.toolButton_5.setToolTip(_translate("MainWindow", "Export Coordinates", None))
# self.toolButton_5.setText(_translate("MainWindow", "...", None))
# item = self.tableWidget.horizontalHeaderItem(0)
# item.setText(_translate("MainWindow", "x", None))
# item = self.tableWidget.horizontalHeaderItem(1)
# item.setText(_translate("MainWindow", "y", None))
# item = self.tableWidget.horizontalHeaderItem(2)
# item.setText(_translate("MainWindow", "z", None))
# self.pushButton_21.setText(_translate("MainWindow", "Redraw", None))
self.toolButton_17.setToolTip(_translate("MainWindow", "Create New", None))
self.toolButton_17.setText(_translate("MainWindow", "...", None))
self.toolButton_10.setToolTip(_translate("MainWindow", "Open Existing", None))
self.toolButton_10.setText(_translate("MainWindow", "...", None))
self.toolButton_20.setToolTip(_translate("MainWindow", "Save to Drive", None))
self.toolButton_20.setText(_translate("MainWindow", "...", None))
self.toolButton_18.setToolTip(_translate("MainWindow", "Load New", None))
self.toolButton_18.setText(_translate("MainWindow", "...", None))
#self.toolButton_4.setToolTip(_translate("MainWindow", "Add new Equation", None))
#self.toolButton_4.setText(_translate("MainWindow", "...", None))
#self.toolButton_3.setToolTip(_translate("MainWindow", "Remove this Equation", None))
#self.toolButton_3.setText(_translate("MainWindow", "...", None))
#self.checkBox.setToolTip(_translate("MainWindow", "Show on Graph", None))
#self.Example.setToolTip(_translate("MainWindow", "Illustrate with an Example", None))
#self.Example.setWhatsThis(_translate("MainWindow", "Example", None))
#self.Example.setText(_translate("MainWindow", "Example", None))
#self.toolButton.setToolTip(_translate("MainWindow", "Always on Top", None))
#self.toolButton.setText(_translate("MainWindow", "...", None))
self.toolButton_25.setToolTip(_translate("MainWindow", "Show/Hide Table", None))
self.toolButton_25.setText(_translate("MainWindow", "...", None))
self.toolButton_19.setToolTip(_translate("MainWindow", "Keyboard", None))
self.toolButton_19.setText(_translate("MainWindow", "...", None))
#self.toolButton_23.setToolTip(_translate("MainWindow", "Print graph", None))
#self.toolButton_23.setText(_translate("MainWindow", "...", None))
#self.toolButton_2.setToolTip(_translate("MainWindow", "Save Graph", None))
#self.toolButton_2.setText(_translate("MainWindow", "...", None))
self.toolButton_24.setToolTip(_translate("MainWindow", "Save Graph", None))
self.toolButton_24.setText(_translate("MainWindow", "...", None))
#self.toolButton_22.setToolTip(_translate("MainWindow", "Go to our FaceBook page", None))
#self.toolButton_22.setText(_translate("MainWindow", "...", None))
#self.toolButton_21.setToolTip(_translate("MainWindow", "Play", None))
#self.toolButton_21.setText(_translate("MainWindow", "...", None))
#self.toolButton_16.setToolTip(_translate("MainWindow", "Stop", None))
#self.toolButton_16.setText(_translate("MainWindow", "...", None))
self.toolButton_15.setToolTip(_translate("MainWindow", "FullScreen", None))
self.toolButton_15.setText(_translate("MainWindow", "...", None))
#self.toolButton_14.setToolTip(_translate("MainWindow", "Enable Anti-Aliasing", None))
#self.toolButton_14.setText(_translate("MainWindow", "...", None))
#self.toolButton_13.setToolTip(_translate("MainWindow", "Zoom All", None))
#self.toolButton_13.setText(_translate("MainWindow", "...", None))
#self.toolButton_12.setToolTip(_translate("MainWindow", "Zoom in", None))
#self.toolButton_12.setText(_translate("MainWindow", "...", None))
#self.toolButton_11.setToolTip(_translate("MainWindow", "Zoom out", None))
#self.toolButton_11.setText(_translate("MainWindow", "...", None))
#self.pushButton_2.setText(_translate("MainWindow", "PushButton", None))
class TabContainer(QtGui.QWidget):
def __init__(self,parent):
super(TabContainer, self).__init__(parent)
self.parent=parent
self.initUI()
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Q"), self, self.close)
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+T"), self, self.add_page)
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+W"), self, self.closeTab_1)
def initUI(self):
#self.setGeometry( 150, 150, 650, 350)
self.tabWidget = QtGui.QTabWidget(self)
self.tabWidget.setTabPosition(QtGui.QTabWidget.North)
# self.tabwidget.setTabShape(QtGui.QTabWidget.Triangular)
#QtCore.QObject.connect(self, QtCore.SIGNAL('tabCloseRequested(int)'), self.closeTab)
self.connect(self.tabWidget, QtCore.SIGNAL('tabCloseRequested (int)'),self.closeTab)
self.tabWidget.setTabsClosable(True)
#self.tabwidget.removeTab(1)
self.tabWidget.setAutoFillBackground(False)
self.tabWidget.setMovable(True)
#self.tabwidget.setTabShape(QtGui.QTabWidget.Rounded)
vbox = QtGui.QVBoxLayout()
self.tabWidget.setDocumentMode(True)
vbox.addWidget(self.tabWidget)
self.tabButton = QtGui.QToolButton(self)
self.tabButton.setText(' + ')
font = self.tabButton.font()
font.setBold(True)
self.tabButton.setFont(font)
self.tabWidget.setCornerWidget(self.tabButton)
self.tabButton.clicked.connect(self.add_page)
self.setLayout(vbox)
self.pages = []
self.add_page()
self.show()
def closeTab(self, index):
#self.tabWidget.widget(index).close()
if self.tabWidget.count()== 1:
self.close()
#self.pages.remove(self.tabwidget.currentWidget())
self.tabWidget.removeTab(index)
self.tabWidget.destroy(index)
print len(self.pages)
def closeTab_1(self):
print"hello"
index=self.tabWidget.currentIndex()
if self.tabWidget.count()== 1:
self.close()
self.pages.remove(self.tabWidget.currentWidget())
self.tabWidget.destroy(index)
self.tabWidget.removeTab(index)
print len(self.pages)
def create_page(self, *contents):
print("creating new page")
page = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
for c in contents:
vbox.addWidget(c)
page.setLayout(vbox)
return page
def add_page(self):
#self.pages.append( self.create_page( MainWindow() ) )
print("adding page")
self.pages.append(Ui_MainWindow(self))
self.tabWidget.addTab( self.pages[-1] , 'Project %s' % len(self.pages) )
self.tabWidget.setCurrentIndex( len(self.pages)-1 )
def getProjectName(self):
return 'Project %s' % len(self.pages)
def showKeyboard(self):
self.parent.show_1()
class Ui_MainWindow_2(QtGui.QMainWindow):
delimit = ','
#t=TabContainer()
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
MainWindow.setCentralWidget(self.centralwidget)
self.t=TabContainer(self)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
#self.menuView = QtGui.QMenu(self.menubar)
#self.menuView.setObjectName(_fromUtf8("menuView"))
self.menuView_2 = QtGui.QMenu(self.menubar)
self.menuView_2.setObjectName(_fromUtf8("menuView_2"))
#self.menuSettigs = QtGui.QMenu(self.menubar)
#self.menuSettigs.setObjectName(_fromUtf8("menuSettigs"))
#self.menuWindow = QtGui.QMenu(self.menubar)
#self.menuWindow.setObjectName(_fromUtf8("menuWindow"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionNew_Project = QtGui.QAction(MainWindow)
self.actionNew_Project.setObjectName(_fromUtf8("actionNew_Project"))
'''self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionSave_As = QtGui.QAction(MainWindow)
self.actionSave_As.setObjectName(_fromUtf8("actionSave_As"))'''
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
#self.actionOpen_Project = QtGui.QAction(MainWindow)
#self.actionOpen_Project.setObjectName(_fromUtf8("actionOpen_Project"))
'''self.actionSave_All = QtGui.QAction(MainWindow)
self.actionSave_All.setObjectName(_fromUtf8("actionSave_All"))'''
#self.actionPrint = QtGui.QAction(MainWindow)
#self.actionPrint.setObjectName(_fromUtf8("actionPrint"))
self.actionClose = QtGui.QAction(MainWindow)
self.actionClose.setObjectName(_fromUtf8("actionClose"))
self.actionTable = QtGui.QAction(MainWindow,checkable=True)
self.actionTable.setChecked(True)
self.actionTable.setIconVisibleInMenu(False)
self.actionTable.setObjectName(_fromUtf8("actionTable"))
self.actionFullScrren = QtGui.QAction(MainWindow)
self.actionFullScrren.setObjectName(_fromUtf8("actionFullScrren"))
self.actionExit_Full_Screen_esc = QtGui.QAction(MainWindow)
self.actionExit_Full_Screen_esc.setObjectName(_fromUtf8("actionExit_Full_Screen_esc"))
self.actionFile_Menu = self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_4.toggleViewAction()
#self.actionFile_Menu.setChecked(True)
self.actionFile_Menu.setObjectName(_fromUtf8("actionFile_Menu"))
self.actionGraph_Menu =self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_3.toggleViewAction()
#self.actionGraph_Menu.setChecked(True)
self.actionGraph_Menu.setObjectName(_fromUtf8("actionGraph_Menu"))
self.actionEquation_Widget = self.t.pages[self.t.tabWidget.currentIndex()].dockWidget.toggleViewAction()
#self.actionEquation_Widget.setChecked(True)
self.actionEquation_Widget.setObjectName(_fromUtf8("actionEquation_Widget"))
self.menuFile.addAction(self.actionNew_Project)
#self.menuFile.addAction(self.actionOpen_Project)
#self.menuFile.addAction(self.actionSave)
#self.menuFile.addAction(self.actionSave_As)
#self.menuFile.addAction(self.actionSave_All)
#self.menuFile.addAction(self.actionPrint)
self.menuFile.addAction(self.actionClose)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuView_2.addAction(self.actionTable)
self.menuView_2.addAction(self.actionFullScrren)
self.menuView_2.addAction(self.actionExit_Full_Screen_esc)
self.menuView_2.addAction(self.actionFile_Menu)
self.menuView_2.addAction(self.actionGraph_Menu)
self.menuView_2.addAction(self.actionEquation_Widget)
self.menubar.addAction(self.menuFile.menuAction())
#self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuView_2.menuAction())
#self.menubar.addAction(self.menuSettigs.menuAction())
#self.menubar.addAction(self.menuWindow.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
##Allotion slots actions created above
self.actionFullScrren.triggered.connect(self.FullScrren)
self.actionExit_Full_Screen_esc.triggered.connect(self.exitFullScreen)
#self.actionSave.triggered.connect(self.save)
#self.actionSave_As.triggered.connect(self.save_as)
self.actionClose.triggered.connect(self.close_1)
self.actionExit.triggered.connect(self.close_2)
#self.actionEquation_Widget.triggered.connect(self.equationWidget)
#self.actionFile_Menu.triggered.connect(self.fileMenu)
self.actionTable.triggered.connect(self.showTable)
#self.actionTable.triggered.connect(self.graphMenu)
self.vbox=QtGui.QVBoxLayout(self.centralwidget)
self.myKeyboard = Ui_DockWidget(self,None)
self.myKeyboard_2 = Ui_DockWidget_2(self,None)
self.vbox.addWidget(self.t)
self.vbox.setMargin(0)
self.vbox.setSpacing(0)
#self.vbox.setContentsMargins(0,0,0,0)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def tabChangedSlot(self,index):
self.menuView_2.removeAction(self.actionFile_Menu)
self.menuView_2.removeAction(self.actionGraph_Menu)
self.menuView_2.removeAction(self.actionEquation_Widget)
self.actionFile_Menu=self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_4.toggleViewAction()
self.actionEquation_Widget=self.t.pages[self.t.tabWidget.currentIndex()].dockWidget.toggleViewAction()
self.actionGraph_Menu=self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_3.toggleViewAction()
self.actionEquation_Widget.setText("Equation Widget")
self.actionGraph_Menu.setText("Graph Menu")
self.actionFile_Menu.setText("File Menu")
self.menuView_2.addAction(self.actionFile_Menu)
self.menuView_2.addAction(self.actionGraph_Menu)
self.menuView_2.addAction(self.actionEquation_Widget)
def close_1(self):
self.t.closeTab_1()
def close_2(self):
self.t.close()
self.close()
def show_1(self):
if self.myKeyboard.isVisible()==False:
#self.myKeyboard.setSize(self.rect)
self.myKeyboard.move(1.73532*self.rect.width()-911-293,1.73532*self.rect.height()-296)
self.myKeyboard.show()
#self.myKeyboard.setTarget(self.dockWidgetContents.eqList[0].frame.widget_4)
else:
self.myKeyboard.hide()
if self.myKeyboard_2.isVisible()==False:
#print self.rect_1.width()
self.myKeyboard_2.move(1.73532*self.rect.width()-350,0)
#self.myKeyboard_2.setTarget(self.target)
self.myKeyboard_2.show()
else:
self.myKeyboard_2.hide()
def setSize(self):
self.rect=self.geometry()
def FullScrren(self):
self.t.pages[self.t.tabWidget.currentIndex()].hideAll()
def exitFullScreen(self):
self.t.pages[self.t.tabWidget.currentIndex()].showAll()
def equationWidget(self):
if self.t.pages[self.t.tabWidget.currentIndex()].dockWidget.isVisible() == False:
self.t.pages[self.t.tabWidget.currentIndex()].dockWidget.show()
else:
self.t.pages[self.t.tabWidget.currentIndex()].dockWidget.hide()
def fileMenu(self):
if self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_4.isVisible() == False:
self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_4.show()
else:
self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_4.hide()
def showTable(self):
if self.t.pages[self.t.tabWidget.currentIndex()].frame.isVisible() == False:
self.t.pages[self.t.tabWidget.currentIndex()].frame.show()
self.t.pages[self.t.tabWidget.currentIndex()].frame_2.hide()
self.t.pages[self.t.tabWidget.currentIndex()].pushButton.hide()
else:
self.t.pages[self.t.tabWidget.currentIndex()].frame.hide()
self.t.pages[self.t.tabWidget.currentIndex()].frame_2.show()
self.t.pages[self.t.tabWidget.currentIndex()].pushButton.show()
def graphMenu(self):
if self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_3.isVisible() == False:
self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_3.show()
else:
self.t.pages[self.t.tabWidget.currentIndex()].dockWidget_3.hide()
#self.dockWidget_4.show()
#self.dockWidget_3.show()
#self.frame.show()
def save(self):
print self.t.pages[self.t.tabWidget.currentIndex()].tabWidget.currentIndex()
def save_as(self):
print "save_as"
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
#self.menuView.setTitle(_translate("MainWindow", "Edit", None))
self.menuView_2.setTitle(_translate("MainWindow", "View", None))
#self.menuSettigs.setTitle(_translate("MainWindow", "Settings", None))
#self.menuWindow.setTitle(_translate("MainWindow", "Window", None))
self.menuHelp.setTitle(_translate("MainWindow", "Help", None))
self.actionNew_Project.setText(_translate("MainWindow", "New Project", None))
#self.actionSave.setText(_translate("MainWindow", "Save", None))
#self.actionSave_As.setText(_translate("MainWindow", "Save As", None))
self.actionExit.setText(_translate("MainWindow", "Exit", None))
#self.actionOpen_Project.setText(_translate("MainWindow", "Open Project", None))
#self.actionSave_All.setText(_translate("MainWindow", "Save All", None))
#self.actionPrint.setText(_translate("MainWindow", "Print", None))
self.actionClose.setText(_translate("MainWindow", "Close", None))
self.actionTable.setText(_translate("MainWindow", "Table", None))
self.actionFullScrren.setText(_translate("MainWindow", "FullScrren", None))
self.actionExit_Full_Screen_esc.setText(_translate("MainWindow", "Exit Full Screen (esc)", None))
self.actionFile_Menu.setText(_translate("MainWindow", "File Menu", None))
self.actionGraph_Menu.setText(_translate("MainWindow", "Graph Menu", None))
self.actionEquation_Widget.setText(_translate("MainWindow", "Equation Widget", None))
def getTabName(self):
return self.t.getProjectName();
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(456, 339)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(70, 280, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(40, 110, 211, 21))
self.label.setObjectName(_fromUtf8("label"))
self.comboBox = QtGui.QComboBox(Dialog)
self.comboBox.setGeometry(QtCore.QRect(260, 110, 141, 32))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.widget = QtGui.QWidget(Dialog)
self.widget.setEnabled(False)
self.widget.setGeometry(QtCore.QRect(50, 200, 311, 71))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(163, 163, 163))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(163, 163, 163))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(163, 163, 163))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.widget.setPalette(palette)
self.widget.setObjectName(_fromUtf8("widget"))
self.label_2 = QtGui.QLabel(self.widget)
self.label_2.setEnabled(False)
self.label_2.setGeometry(QtCore.QRect(10, 10, 281, 21))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(self.widget)
self.label_3.setGeometry(QtCore.QRect(10, 30, 291, 21))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.widget_2 = QtGui.QWidget(Dialog)
self.widget_2.setGeometry(QtCore.QRect(60, 20, 361, 71))
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.label_6 = QtGui.QLabel(self.widget_2)
self.label_6.setEnabled(False)
self.label_6.setGeometry(QtCore.QRect(10, 10, 281, 22))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_7 = QtGui.QLabel(self.widget_2)
self.label_7.setEnabled(False)
self.label_7.setGeometry(QtCore.QRect(10, 30, 331, 22))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setEnabled(False)
self.label_4.setGeometry(QtCore.QRect(240, 160, 83, 22))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.textEdit = QtGui.QTextEdit(Dialog)
self.textEdit.setEnabled(False)
self.textEdit.setGeometry(QtCore.QRect(320, 160, 41, 31))
self.textEdit.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
#self.buttonBox.button(QtGui.QDialogButtonBox.Ok).clicked.connect(lambda:self.storeDelim)
self.comboBox.activated.connect(self.storeDelim)
self.textEdit.textChanged.connect(self.storeDelim)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Choose Delimiter", None))
self.label.setText(_translate("Dialog", "Please specify delimiter :", None))
self.comboBox.setItemText(0, _translate("Dialog", "\',\' (comma)", None))
self.comboBox.setItemText(1, _translate("Dialog", "\' \' (space)", None))
self.comboBox.setItemText(2, _translate("Dialog", "\';\' (semicolon)", None))
self.comboBox.setItemText(3, _translate("Dialog", "\'-\' (dash)", None))
self.comboBox.setItemText(4, _translate("Dialog", "\':\' (colon)", None))
self.comboBox.setItemText(5, _translate("Dialog", "Specify other", None))
self.label_2.setText(_translate("Dialog", "The Plotter retrieves values ", None))
self.label_3.setText(_translate("Dialog", "separated by the chosen delimiter", None))
self.label_6.setText(_translate("Dialog", "The Plotter has detected that the", None))
self.label_7.setText(_translate("Dialog", "selected file is NOT in proper csv format", None))
self.label_4.setText(_translate("Dialog", "Specify :", None))
def storeDelim(self):
if self.comboBox.currentIndex()==0:
self.ch = ','
self.label_4.setEnabled(False)
self.textEdit.setEnabled(False)
elif self.comboBox.currentIndex()==1:
self.ch = ' '
self.label_4.setEnabled(False)
self.textEdit.setEnabled(False)
elif self.comboBox.currentIndex()==2:
self.ch = ';'
self.label_4.setEnabled(False)
self.textEdit.setEnabled(False)
elif self.comboBox.currentIndex()==3:
self.ch = '-'
self.label_4.setEnabled(False)
self.textEdit.setEnabled(False)
elif self.comboBox.currentIndex()==4:
self.ch = ':'
self.label_4.setEnabled(False)
self.textEdit.setEnabled(False)
elif self.comboBox.currentIndex()==5:
self.label_4.setEnabled(True)
self.textEdit.setEnabled(True)
self.ch=str(self.textEdit.toPlainText())
def getDelim(self):
return self.ch
class StartDialog(QtGui.QDialog, Ui_Dialog):
def __init__(self,parent=None):
QtGui.QDialog.__init__(self,parent)
self.setupUi(self)
class Ui_Dialog_2(object): ## class for error Dialog Box
mssg = 'error message'
def __init__(self,string):
self.mssg = string
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(400, 126)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(140, 70, 91, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(40, 30, 400, 22))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label.setText(_translate("Dialog", self.mssg, None))
def show_splash(path):
image = QtGui.QPixmap(path)
splash = SplashScreen(image)
font = QtGui.QFont(splash.font())
font.setPointSize(font.pointSize() + 5)
splash.setFont(font)
splash.show()
QtGui.QApplication.processEvents()
for count in range(1, 100):
splash.showMessage(splash.tr('Processing %1...').arg(count),
QtCore.Qt.AlignBottom+20, QtCore.Qt.black)
QtGui.QApplication.processEvents()
QtCore.QThread.msleep(15)
splash.hide()
splash.close()
def changedFocusSlot(old, now):
if type(now) is QLineEdit:
keyboard.setTarget(now)
keyboard_2.setTarget(now)
import sys
if __name__ == '__main__':
app = QtGui.QApplication.instance()
#show_splash('splashscreen.jpg')
ex = Ui_MainWindow_2()
keyboard=ex.myKeyboard
keyboard_2=ex.myKeyboard_2
QtCore.QObject.connect(app, SIGNAL("focusChanged(QWidget *, QWidget *)"), changedFocusSlot)
QtCore.QObject.connect(ex.t.tabWidget, QtCore.SIGNAL(_fromUtf8("currentChanged(int)")), ex.tabChangedSlot)
#ex.myKeyboard.
ex.showMaximized()
ex.setSize()
#app.focusChanged.connect(keyboardFocusChanger)
sys.exit(app.exec_())
| gpl-2.0 |
ishank08/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
bh107/benchpress | benchpress/benchmarks/xraysim/python_numpy/material.py | 1 | 10037 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 22 09:10:36 2014
@author: Mads Thoudahl
"""
import os
from numpy import loadtxt, logspace
try:
from cPickle import load, dump
except:
from pickle import load, dump
class Material():
""" Material class, representing all materials """
# for enumeration purposes
vacuum = 0
hydrogen = 1
carbon = 6
aluminium = 13
titanium = 22
iron = 26
gold = 79
air = 801
blood = 802
bone = 803
brain = 804
muscle = 805
tissue = 806
cam = 821
oak = 822
pe = 851
# a .csv file exists for everyone of these materials.
uids = {} # unique ids, held at no protection, so peacul environment anticipated
full_path = os.path.realpath(__file__)
exec_path, exec_filename = os.path.split(full_path)
elmpath = exec_path + '/materials/elemental'
biopath = exec_path + '/materials/biotic'
def __init__(self,
filename, # csv file containing the measurepoints of this material
density=0 # material density in g/cm3
):
""" instantiate a material object from a csv file,
structured line-wise:
1: 'Name', *material-name*
2: 'Z', *elemental atomic number*
3: 'rho (g/cm3)', *standard material density @ 1 atm, 20 deg C*
4: 'Energy (MeV)', 'mu/rho (cm2/g)'
5: *measurepoints sorted by energy!*
"""
try:
self.short = filename[:-4] # name, shorthand
# get name, atomic number and standard density
f = open(filename, "r")
self.name = f.readline().split(',')[1].strip().strip('"')
z = int( f.readline().split(',')[1] )
self.uid = Material.uniqueid(self, z)
if self.uid == -1:
raise ValueError('{} does not have a new Unique ID (Z) (or has been initialized before).'.format(self.name))
self.rho = float( density if density else f.readline().split(',')[1] )
f.close()
# get datapoints from file for spline interpolation
x = loadtxt(fname=filename, delimiter=',', skiprows=4, usecols=(0,1))
self.Es = x[:,0]
self.mus = x[:,1] * self.rho
self.mu = {}
except ValueError as e:
print(e)
return None
@staticmethod
def uniqueid(self, uid):
""" Checks if uid is vacant, if so occupies it.
returns unique id, or -1 on error """
if uid in Material.uids:
return -1
else:
Material.uids[uid] = True
return uid
def __str__(self):
""" returns a string describing this material """
return self.name
def getMu(self, energy):
""" returns the absorption coefficient for this material at a
certain energy level """
# load/store Optimization
try:
# get stored mu as chances are the same has been calculated before
return self.mu[energy]
except (KeyError, TypeError) as e:
e = e
pass
# use cubic interpolation functionto determine mu
# fun = interp1d( self.Es, self.mus, kind='cubic')
# NO CAN DO... DUE TO OSCILATIONS IT DOES NOT YIELD THE CORRECT RESULT
#fun = interp1d( self.Es, self.mus, kind='linear')
#mu = fun(energy)
# Linear spline interpolation, self-implemented
for i in range(len(self.Es)-1)[::-1]:
if energy - self.Es[i] >= 0: break
mu = self.mus[i] + (energy - self.Es[i]) * \
( (self.mus[i+1] - self.mus[i]) / (self.Es[i+1] - self.Es[i]) )
# store calculated mu value and return it
try:
self.mu[energy] = mu
except (KeyError, TypeError) as e:
e = e
pass
return mu
@staticmethod
def initGroup(path):
""" initiating all elemental materials in csv files in path """
try:
with open('data', 'rb') as fp:
data = load(fp)
return data
except IOError:
data = {}
for file_ in os.listdir( path ):
if file_.endswith(".csv"):
mat = Material( path + '/' + file_ )
if mat is None:
print("{} failed to initialize".format(file_))
data[mat.uid] = mat
return data
@staticmethod
def initAll():
""" initiating all elemental materials in csv files in path """
materials = { 0 : Simplemat(name = 'Vacuum', mu = 0, uid = 0) }
materials.update( Material.initElementals() )
materials.update( Material.initBiotics() )
return materials
@staticmethod
def initElementals():
""" initiating all elemental materials in csv files in path """
return Material.initGroup(Material.elmpath)
@staticmethod
def initBiotics():
""" initiating all elemental materials in csv files in path """
return Material.initGroup(Material.biopath)
class Simplemat(Material):
""" Manual subclass of Material, are capable of returning a constant
attenuation coefficient, regardless of the energy of the penetrating
ray, which is the simplest modelling of a material in this regard """
def __init__(self, name, mu, uid):
""" instantiate a material subclass instance """
try:
self.name = name
self.mu = mu
self.uid = Material.uniqueid(self, uid)
if self.uid == -1:
raise Exception('{} does not have a new Unique ID (Z) (or has been initialized before).'.format(self.name))
except Exception as e:
print(e)
return
def __str__(self):
""" returns a string describing this material """
return self.name
def getMu(self, energy):
""" returns the calculated atenuation coefficient """
return self.mu
if __name__ == "__main__":
bigplotmaterial = Material.carbon
elementals = Material.initElementals()
biotics = Material.initBiotics()
materials = dict(elementals)
materials.update(biotics)
materials[0] = Simplemat('Vacuum',0,0)
elementals = [key for key in elementals.iterkeys()]
biotics = [key for key in biotics.iterkeys()]
print(elementals)
print(biotics)
for key, value in materials.iteritems() :
print(key, value)
from matplotlib import pyplot as plt
i = 0
for key in elementals+biotics:
if i % 6 == 0:
plt.figure()
plt.suptitle('Linear attenuation coefficients of \n'+\
'various materials at standard densities',\
fontsize='xx-large')
i = 0
i += 1
title = materials[key].name
plt.subplot(3,2,i)
plt.title(title, fontsize='x-large')
plt.xlabel('E [MeV]', fontsize='large')
plt.ylabel(' mu [1/cm]', fontsize='large')
plt.loglog( materials[key].Es, materials[key].mus,\
c='r', marker='o', ls='', label='NIST datapoints')
xs = logspace(-3, 1.2, num=300, base=10)
ys = [materials[key].getMu(x) for x in xs]
plt.loglog( xs, ys, c='b', ls='-', label='Interpolating linear splines')
plt.legend(loc = 'lower left')
plt.xticks([10**(-2),10])
plt.yticks([10**(-2),10])
plt.figure()
plt.rc('text', usetex=True)
#plt.title('Linear attenuation coefficients of Hydrogen',\
# fontsize='xx-large')
title = materials[bigplotmaterial].name
plt.title(title, fontsize='x-large')
plt.xlabel('E [MeV]', fontsize='large')
plt.ylabel(r'\mu [1/cm]', fontsize='large')
plt.loglog( materials[bigplotmaterial].Es, materials[bigplotmaterial].mus,\
c='r', marker='o', ls='', label='NIST datapoints')
xs = logspace(-3, 1.2, num=300, base=10)
ys = [materials[bigplotmaterial].getMu(x) for x in xs]
plt.loglog( xs, ys, c='b', ls='-', label='Interpolating linear splines')
plt.legend(loc = 'upper right')
#plt.xlim( (0.001,1) )
#plt.ylim( (0.00001,0.001))
plt.show()
# creating plots to visualize comparison of attenuation coefficients
#elementals = Material.initElementals()
#biotics = Material.initBiotics()
#materials = dict(elementals)
#materials.update(biotics)
#materials['Vacuum'] = Simplemat('Vacuum',0)
#
#xs = linspace(0.00, 120, 400)
#ys1 = [materials['Blood'].getMu(x/1000) for x in xs]
#ys2 = [materials['Cam'].getMu(x/1000) for x in xs]
#ys3 = [materials['Oak'].getMu(x/1000) for x in xs]
#ys4 = [materials['Bone'].getMu(x/1000) for x in xs]
#ys5 = [materials['Air'].getMu(x/1000) for x in xs]
#ys6 = [materials['Ti'].getMu(x/1000) for x in xs]
#ys7 = [materials['Au'].getMu(x/1000) for x in xs]
#ys8 = [materials['Fe'].getMu(x/1000) for x in xs]
#ys9 = [materials['C'].getMu(x/1000) for x in xs]
#
#plt.figure()
#plt.title('Comparison of attenuation coefficient as function of energy', fontsize='x-large')
#plt.xlabel('E [keV]', fontsize='large')
#plt.ylabel(r'\mu [1/cm]', fontsize='large')
#plt.ylim( (-0.001,2) )
#
#plt.plot(xs,ys1, '--r', label='Blood')
#plt.plot(xs,ys2, '--m', label='Cam')
#plt.plot(xs,ys3, '--g', label='Oak')
#plt.plot(xs,ys4, '--b', label='Bone')
#plt.plot(xs,ys5, '--k', label='Air')
#plt.legend(loc = 'upper right')
#
#
#plt.figure()
#plt.title('Comparison of attenuation coefficient as function of energy', fontsize='x-large')
#plt.xlabel('E [keV]', fontsize='large')
#plt.ylabel(r'\mu [1/cm]', fontsize='large')
#plt.ylim( (-0.001,50e5) )
#plt.xlim( (-0.001,40) )
#
#plt.plot(xs,ys4, '--c', label='Bone')
#plt.plot(xs,ys9, '--k', label='Carbon')
#plt.plot(xs,ys6, '--r', label='Titanium')
#plt.plot(xs,ys8, '--b', label='Iron')
#plt.plot(xs,ys7, '--y', label='Gold')
#plt.legend(loc = 'right')
| apache-2.0 |
hdmetor/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
pllim/astropy | astropy/visualization/wcsaxes/tests/test_frame.py | 11 | 5290 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from astropy.visualization.wcsaxes import WCSAxes
from astropy.visualization.wcsaxes.frame import BaseFrame
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
from .test_images import BaseImageTests
class HexagonalFrame(BaseFrame):
spine_names = 'abcdef'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
ymid = 0.5 * (ymin + ymax)
xmid1 = (xmin + xmax) / 4.
xmid2 = (xmin + xmax) * 3. / 4.
self['a'].data = np.array(([xmid1, ymin], [xmid2, ymin]))
self['b'].data = np.array(([xmid2, ymin], [xmax, ymid]))
self['c'].data = np.array(([xmax, ymid], [xmid2, ymax]))
self['d'].data = np.array(([xmid2, ymax], [xmid1, ymax]))
self['e'].data = np.array(([xmid1, ymax], [xmin, ymid]))
self['f'].data = np.array(([xmin, ymid], [xmid1, ymin]))
class TestFrame(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_custom_frame(self):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7],
wcs=wcs,
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.coords.grid(color='white')
im = ax.imshow(np.ones((149, 149)), vmin=0., vmax=2.,
origin='lower', cmap=plt.cm.gist_heat)
minpad = {}
minpad['a'] = minpad['d'] = 1
minpad['b'] = minpad['c'] = minpad['e'] = minpad['f'] = 2.75
ax.coords['glon'].set_axislabel("Longitude", minpad=minpad)
ax.coords['glon'].set_axislabel_position('ad')
ax.coords['glat'].set_axislabel("Latitude", minpad=minpad)
ax.coords['glat'].set_axislabel_position('bcef')
ax.coords['glon'].set_ticklabel_position('ad')
ax.coords['glat'].set_ticklabel_position('bcef')
# Set limits so that no labels overlap
ax.set_xlim(5.5, 100.5)
ax.set_ylim(5.5, 110.5)
# Clip the image to the frame
im.set_clip_path(ax.coords.frame.patch)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_update_clip_path_rectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_update_clip_path_nonrectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal',
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_update_clip_path_change_wcs(self, tmpdir):
# When WCS is changed, a new frame is created, so we need to make sure
# that the path is carried over to the new frame.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.reset_wcs()
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
def test_copy_frame_properties_change_wcs(self):
# When WCS is changed, a new frame is created, so we need to make sure
# that the color and linewidth are transferred over
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.coords.frame.set_linewidth(5)
ax.coords.frame.set_color('purple')
ax.reset_wcs()
assert ax.coords.frame.get_linewidth() == 5
assert ax.coords.frame.get_color() == 'purple'
| bsd-3-clause |
pmyates/plutokore | scripts/generate_pluto_input_data.py | 1 | 7662 | #!/usr/bin/env python3
# system
import os
import sys
# utilities
from numba import jit
from pathlib import Path
import h5py
from IPython.display import display,clear_output
from datetime import date
# science imports
import numpy as np
import scipy.interpolate
import scipy.integrate
from scipy.integrate import trapz
# matplotlib imports
import matplotlib as mpl
import matplotlib.pyplot as plot
# astropy imports
from astropy.table import Table
from astropy import units as u # Astropy units
from astropy import cosmology as cosmo # Astropy cosmology
from astropy import constants as const # Astropy constants
from astropy.convolution import convolve, Gaussian2DKernel # Astropy convolutions
# plutokore
if os.path.exists(os.path.expanduser('~/plutokore')):
sys.path.append(os.path.expanduser('~/plutokore'))
else:
sys.path.append(os.path.expanduser('~/uni/plutokore'))
import plutokore as pk
import plutokore.radio as radio
from plutokore.jet import UnitValues
unit_length = 1 * u.kpc
unit_density = (0.60364 * u.u / (u.cm ** 3)).to(u.g / u.cm ** 3)
unit_speed = const.c
unit_time = (unit_length / unit_speed).to(u.Myr)
unit_pressure = (unit_density * (unit_speed ** 2)).to(u.Pa)
unit_mass = (unit_density * (unit_length ** 3)).to(u.kg)
unit_energy = (unit_mass * (unit_length**2) / (unit_time**2)).to(u.J)
uv = UnitValues(
density=unit_density,
length=unit_length,
time=unit_time,
mass=unit_mass,
pressure=unit_pressure,
energy=unit_energy,
speed=unit_speed,
)
def write_pluto_grid_information(*, grid_fname, dimensions, geometry, grid_list, extra_info = None):
"""
Writes out a PLUTO grid file in the same format as grid.out, for the given grid information
Parameters
----------
grid_fname : str
The grid filename
dimensions : int
The number of grid dimensions [1, 2, or 3]
geometry : str
The grid geometry [CARTESIAN, SPHERICAL, CYLINDRICAL, or POLAR]
grid_list : List[1D numpy.ndarray]
A list of n numpy arrays for an n-dimensional grid, where each numpy array contains the cell edges in that coordinate
extra_info : List[str]
A list of extra information that will be printed to the grid.out file
"""
# check arguments
if dimensions not in [1, 2, 3]:
raise Exception('Invalid dimensions')
if geometry not in ['CARTESIAN', 'SPHERICAL', 'CYLINDRICAL', 'POLAR']:
raise Exception('Invalid geometry')
# Write out our grid file
with open(grid_fname, 'w') as f:
# Write header info
f.write('# ' + '*'*50 + '\n')
f.write('# PLUTO 4.3 Grid File\n')
f.write(f'# Manually generated on {date.today()}\n')
f.write('#\n')
f.write('# Info:\n')
f.write('# Input data generated by python\n')
f.write(f'# Endianess: {sys.byteorder}\n')
if extra_info is not None:
f.writelines([f'# {line}\n' for line in extra_info])
f.write('#\n')
f.write(f'# DIMENSIONS: {dimensions}\n')
f.write(f'# GEOMETRY: {geometry}\n')
for dim in range(dimensions):
f.write(f'# X{dim+1}: [ {grid_list[dim][0]}, {grid_list[dim][-1]}], {grid_list[dim].shape[0] - 1} point(s), 0 ghosts\n')
f.write('# ' + '*'*50 + '\n')
# Write out our grid points
for dim in range(3):
if (dim < dimensions):
f.write(f'{grid_list[dim].shape[0] - 1}\n')
for pn in np.arange(grid_list[dim].shape[0]-1) + 1:
f.write(f'\t{pn}\t{grid_list[dim][pn-1]:.12e}\t{grid_list[dim][pn]:.12e}\n')
else:
f.write(f'{1}\n')
f.write(f'\t{1}\t{0:.12e}\t{1:.12e}\n')
pass
def write_pluto_data(*, data_fname, data_list):
"""
Writes out a PLUTO data file in the same format as *.dbl, for the given variables
Parameters
----------
data_fname : str
The data filename (must end in .dbl)
data_list : List[n-dimensional numpy.ndarray]
A list of numpy arrays (one per variable) to be written out to the data file.
Each numpy array should have the same dimensions as the grid, and contain the cell-centered values
of that variable.
"""
with open(data_fname, 'w') as f:
for vdata in data_list:
vdata.astype(np.double).flatten(order = 'F').tofile(f)
def write_pluto_initial_conditions(*, grid_fname, data_fname, grid_list, data_dict, dimensions, geometry, extra_info = None):
"""
Writes out PLUTO grid and data files to be used as intial conditions
Parameters
----------
grid_fname : str
The grid filename
data_fname : str
The data filename (must end in .dbl)
grid_list : List[1D numpy.ndarray]
A list of n numpy arrays for an n-dimensional grid, where each numpy array contains the cell edges in that coordinate
data_dict : Dictionary{str, n-dimensional numpy.ndarray}
A dictionary of numpy variable arrays.
Each variable should have it's own entry, e.g. {'rho' : rho_numpy_array }, where the variable array
should have the same dimensions as the grid, and contain the cell-centered values of that variable
dimensions : int
The number of grid dimensions [1, 2, or 3]
geometry : str
The grid geometry [CARTESIAN, SPHERICAL, CYLINDRICAL, or POLAR]
extra_info : List[str]
A list of extra information that will be printed to the grid.out file
"""
if extra_info is None:
extra_info = []
write_pluto_grid_information(grid_fname = grid_fname,
dimensions = dimensions,
geometry = geometry,
grid_list = grid_list,
extra_info = extra_info + ['Variables in dbl file:'] + list(data_dict.keys()))
write_pluto_data(data_fname = data_fname,
data_list = list(data_dict.values()))
def main():
# first we generate our data
# for testing, I've created a grid ranging from -100 to 100 in each direction, with random edge sampling
# note that the ex, ey, ez coordinate arrays are EDGES
nx, ny, nz = (101, 51, 201)
ex = np.sort(np.random.uniform(low = -55, high = 55, size = nx))
ey = np.sort(np.random.uniform(low = -55, high = 55, size = ny))
ez = np.sort(np.random.uniform(low = -55, high = 55, size = nz))
# now we create our midpoint arrays
mx = (np.diff(ex)*0.5) + ex[:-1]
my = (np.diff(ey)*0.5) + ey[:-1]
mz = (np.diff(ez)*0.5) + ez[:-1]
print(mx.shape)
# let's generate some sort of density data
# first we create our meshgrid
# note the 'ij' indexing - THIS IS IMPORTANT. Need to use this if working in 3D
mesh_x,mesh_y,mesh_z = np.meshgrid(mx, my, mz, indexing = 'ij')
# create our radius array
r = np.sqrt(mesh_x**2 + mesh_y**2 + mesh_z**2)
# create our density array
rho = 1 * np.power(1 + (r/144), -3/2 * 0.5)
# for fun, let's also create a vx3 velocity array that is proportional to the current radius
vx3 = r * 1e-5
# now we save our data
write_pluto_initial_conditions(grid_fname = 'simple-3D.grid.out',
data_fname = 'simple-3D.dbl',
grid_list = [ex, ey, ez],
data_dict = {'rho': rho, 'vx3': vx3},
dimensions = 3,
geometry = 'CARTESIAN')
f,a = plot.subplots()
a.pcolormesh(mx, mz, rho[:,0,:].T)
plot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
leesavide/pythonista-docs | Documentation/matplotlib/examples/misc/svg_filter_line.py | 9 | 2145 | """
Demonstrate SVG filtering effects which might be used with mpl.
Note that the filtering effects are only effective if your svg rederer
support it.
"""
from __future__ import print_function
import matplotlib
matplotlib.use("Svg")
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
fig1 = plt.figure()
ax = fig1.add_axes([0.1, 0.1, 0.8, 0.8])
# draw lines
l1, = ax.plot([0.1, 0.5, 0.9], [0.1, 0.9, 0.5], "bo-",
mec="b", lw=5, ms=10, label="Line 1")
l2, = ax.plot([0.1, 0.5, 0.9], [0.5, 0.2, 0.7], "rs-",
mec="r", lw=5, ms=10, color="r", label="Line 2")
for l in [l1, l2]:
# draw shadows with same lines with slight offset and gray colors.
xx = l.get_xdata()
yy = l.get_ydata()
shadow, = ax.plot(xx, yy)
shadow.update_from(l)
# adjust color
shadow.set_color("0.2")
# adjust zorder of the shadow lines so that it is drawn below the
# original lines
shadow.set_zorder(l.get_zorder()-0.5)
# offset transform
ot = mtransforms.offset_copy(l.get_transform(), fig1,
x=4.0, y=-6.0, units='points')
shadow.set_transform(ot)
# set the id for a later use
shadow.set_gid(l.get_label()+"_shadow")
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
# save the figure as a string in the svg format.
from StringIO import StringIO
f = StringIO()
plt.savefig(f, format="svg")
import xml.etree.cElementTree as ET
# filter definition for a gaussian blur
filter_def = """
<defs xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>
<filter id='dropshadow' height='1.2' width='1.2'>
<feGaussianBlur result='blur' stdDeviation='3'/>
</filter>
</defs>
"""
# read in the saved svg
tree, xmlid = ET.XMLID(f.getvalue())
# insert the filter definition in the svg dom tree.
tree.insert(0, ET.XML(filter_def))
for l in [l1, l2]:
# pick up the svg element with given id
shadow = xmlid[l.get_label()+"_shadow"]
# apply shdow filter
shadow.set("filter",'url(#dropshadow)')
fn = "svg_filter_line.svg"
print("Saving '%s'" % fn)
ET.ElementTree(tree).write(fn)
| apache-2.0 |
M4rtinK/pyside-bb10 | doc/inheritance_diagram.py | 10 | 12497 | # -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:copyright: Copyright 2010-2011 by the PySide team.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.ext.graphviz import render_dot_html, render_dot_latex
from sphinx.util.compat import Directive
class_sig_re = re.compile(r'''^([\w.]*\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False, parts=0):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = class_sig_re.match(name).groups()
except (AttributeError, ValueError):
raise InheritanceException('Invalid class or module %r specified '
'for inheritance diagram' % name)
fullname = (path or '') + base
path = (path and path.rstrip('.') or '')
# two possibilities: either it is a module, then import it
try:
__import__(fullname)
todoc = sys.modules[fullname]
except ImportError:
# else it is a class, then import the module
if not path:
if currmodule:
# try the current module
path = currmodule
else:
raise InheritanceException(
'Could not import class %r specified for '
'inheritance diagram' % base)
try:
__import__(path)
todoc = getattr(sys.modules[path], base)
except (ImportError, AttributeError):
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % (path + '.' + base))
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
builtins = __builtins__.values()
def recurse(cls):
if not show_builtins and cls in builtins:
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
baselist = []
all_classes[cls] = (nodename, fullname, baselist)
for base in cls.__bases__:
if not show_builtins and base in builtins:
continue
if base.__name__ == "Object" and base.__module__ == "Shiboken":
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return all_classes.values()
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [fullname for (_, fullname, _) in self.class_info]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': 'Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in attrs.items()])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in attrs.items()])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for name, fullname, bases in self.class_info:
# Write the node
this_node_attrs = n_attrs.copy()
url = urls.get(fullname)
if url is not None:
this_node_attrs['URL'] = '"%s"' % url
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.temp_data.get('py:module'),
parts=node['parts'])
except InheritanceException, err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False),
app.add_config_value('inheritance_node_attrs', {}, False),
app.add_config_value('inheritance_edge_attrs', {}, False),
| lgpl-2.1 |
dsquareindia/scikit-learn | sklearn/linear_model/bayes.py | 4 | 19796 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Preserve the alpha and lambda values that were used to
# calculate the final coefficients
self.alpha_ = alpha_
self.lambda_ = lambda_
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
ARD is a little different than the slide: only dimensions/features for
which self.lambda_ < self.threshold_lambda are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| bsd-3-clause |
balavenkatesan/yellowbrick | yellowbrick/text/tsne.py | 2 | 12460 | # yellowbrick.text.tsne
# Implements TSNE visualizations of documents in 2D space.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Mon Feb 20 06:33:29 2017 -0500
#
# Copyright (C) 2016 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: tsne.py [] [email protected] $
"""
Implements TSNE visualizations of documents in 2D space.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from yellowbrick.text.base import TextVisualizer
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.style.colors import resolve_colors, get_color_cycle
from sklearn.manifold import TSNE
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD, PCA
##########################################################################
## Quick Methods
##########################################################################
def tsne(X, y=None, ax=None, decompose='svd', decompose_by=50, classes=None,
colors=None, colormap=None, **kwargs):
"""
Display a projection of a vectorized corpus in two dimensions using TSNE,
a nonlinear dimensionality reduction method that is particularly well
suited to embedding in two or three dimensions for visualization as a
scatter plot. TSNE is widely used in text analysis to show clusters or
groups of documents or utterances and their relative proximities.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with tsne.
y : ndarray or Series of length n
An optional array or series of target or class values for instances.
If this is specified, then the points will be colored according to
their class. Often cluster labels are passed in to color the documents
in cluster space, so this method is used both for classification and
clustering methods.
ax : matplotlib axes
The axes to plot the figure on.
decompose : string or None
A preliminary decomposition is often used prior to TSNE to make the
projection faster. Specify `"svd"` for sparse data or `"pca"` for
dense data. If decompose is None, the original data set will be used.
decompose_by : int
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
classes : list of strings
The names of the classes in the target, used to create a legend.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
kwargs : dict
Pass any additional keyword arguments to the TSNE transformer.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on.
"""
# Instantiate the visualizer
visualizer = TSNEVisualizer(
ax, decompose, decompose_by, classes, colors, colormap, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the axes object on the visualizer
return visualizer.ax
##########################################################################
## TSNEVisualizer
##########################################################################
class TSNEVisualizer(TextVisualizer):
"""
Display a projection of a vectorized corpus in two dimensions using TSNE,
a nonlinear dimensionality reduction method that is particularly well
suited to embedding in two or three dimensions for visualization as a
scatter plot. TSNE is widely used in text analysis to show clusters or
groups of documents or utterances and their relative proximities.
TSNE will return a scatter plot of the vectorized corpus, such that each
point represents a document or utterance. The distance between two points
in the visual space is embedded using the probability distribution of
pairwise similarities in the higher dimensionality; thus TSNE shows
clusters of similar documents and the relationships between groups of
documents as a scatter plot.
TSNE can be used with either clustering or classification; by specifying
the ``classes`` argument, points will be colored based on their similar
traits. For example, by passing ``cluster.labels_`` as ``y`` in ``fit()``, all
points in the same cluster will be grouped together. This extends the
neighbor embedding with more information about similarity, and can allow
better interpretation of both clusters and classes.
For more, see https://lvdmaaten.github.io/tsne/
Parameters
----------
ax : matplotlib axes
The axes to plot the figure on.
decompose : string or None
A preliminary decomposition is often used prior to TSNE to make the
projection faster. Specify `"svd"` for sparse data or `"pca"` for
dense data. If decompose is None, the original data set will be used.
decompose_by : int
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
classes : list of strings
The names of the classes in the target, used to create a legend.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
kwargs : dict
Pass any additional keyword arguments to the TSNE transformer.
"""
def __init__(self, ax=None, decompose='svd', decompose_by=50, classes=None,
colors=None, colormap=None, **kwargs):
"""
Initialize the TSNE visualizer with visual hyperparameters.
"""
super(TSNEVisualizer, self).__init__(ax=ax, **kwargs)
# Visualizer parameters
self.classes_ = classes
self.n_instances_ = 0
# Visual Parameters
# TODO: Only colors currently works to select the colors of classes.
self.colors = colors
self.colormap = colormap
# TSNE Parameters
self.transformer_ = self.make_transformer(decompose, decompose_by, kwargs)
def make_transformer(self, decompose='svd', decompose_by=50, tsne_kwargs={}):
"""
Creates an internal transformer pipeline to project the data set into
2D space using TSNE, applying an pre-decomposition technique ahead of
embedding if necessary. This method will reset the transformer on the
class, and can be used to explore different decompositions.
Parameters
----------
decompose : string or None
A preliminary decomposition is often used prior to TSNE to make the
projection faster. Specify `"svd"` for sparse data or `"pca"` for
dense data. If decompose is None, the original data set will be used.
decompose_by : int
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
Returns
-------
transformer : Pipeline
Pipelined transformer for TSNE projections
"""
decompositions = {
'svd': TruncatedSVD,
'pca': PCA,
}
if decompose and decompose.lower() not in decompositions:
raise YellowbrickValueError(
"'{}' is not a valid decomposition, use {}, or None".format(
decompose, ", ".join(decompositions.keys())
)
)
# Create the pipeline steps
steps = []
# Add the pre-decomposition
if decompose:
klass = decompositions[decompose]
steps.append((decompose, klass(n_components=decompose_by)))
# Add the TSNE manifold
steps.append(('tsne', TSNE(n_components=2, **tsne_kwargs)))
# return the pipeline
return Pipeline(steps)
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the TSNE projection
since the visualization requires both X and an optional y value. The
fit method expects an array of numeric vectors, so text documents must
be vectorized before passing them to this method.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with tsne.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class. Often cluster labels are passed in to
color the documents in cluster space, so this method is used both
for classification and clustering methods.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
# If we don't have classes already stored, store them.
if y and self.classes_ is None:
self.classes_ = [str(label) for label in set(y)]
# Fit our internal transformer and transform the data.
vecs = self.transformer_.fit_transform(X)
self.n_instances_ += vecs.shape[0]
# Draw the vectors
self.draw(vecs, y, **kwargs)
# Fit always returns self.
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method draws the TSNE scatter plot,
from a set of decomposed points in 2 dimensions. This method also
accepts a third dimension, target, which is used to specify the colors
of each of the points. If the target is not specified, then the points
are plotted as a single cloud to show similar documents.
"""
# Create the axis if it doesn't exist
if self.ax is None: self.ax = plt.gca()
# Create the color mapping for the classes.
# TODO: Allow both colormap, listed colors, and palette definition
# See the FeatureVisualizer for more on this.
color_values = get_color_cycle()
classes = self.classes_ or [None]
colors = dict(zip(classes, color_values))
# Expand the points into vectors of x and y for scatter plotting,
# assigning them to their label if the label has been passed in.
# Additionally, filter classes not specified directly by the user.
series = defaultdict(lambda: {'x':[], 'y':[]})
if self.classes_: classes = frozenset(self.classes_)
if target:
for label, point in zip(target, points):
if self.classes_ and label not in classes:
continue
series[label]['x'].append(point[0])
series[label]['y'].append(point[1])
else:
for x,y in points:
series[None]['x'].append(x)
series[None]['y'].append(y)
# Plot the points
for label, points in series.items():
self.ax.scatter(points['x'], points['y'], c=colors[label], alpha=0.7, label=label)
def finalize(self, **kwargs):
"""
Finalize the drawing by adding a title and legend, and removing the
axes objects that do not convey information about TNSE.
"""
# Add a title
self.set_title(
"TSNE Projection of {} Documents".format(self.n_instances_)
)
# Remove the ticks
self.ax.set_yticks([])
self.ax.set_xticks([])
# Add the legend outside of the figure box.
if self.classes_:
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
self.ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
| apache-2.0 |
NunoEdgarGub1/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
Hash--/ICRH | WEST_design/matching_double_conjugateT_VSWR_vs_Rc.py | 2 | 4280 | # -*- coding: utf-8 -*-
"""
Created on 27/03/2015
Matching a double conjugate-T and checking the load resilience vs
the coupling resistance.
@author: J.Hillairet
"""
import skrf as rf
from antenna.conjugate_t import ConjugateT
from antenna.resonant_loop import ResonantDoubleLoop
from antenna.topica import *
from matplotlib.pylab import *
f_match = 55e6 # matching frequency
z_match = [29.74 - 0*1j, 29.74 - 0*1j] # matching impedance target
power_input = [1.5e6, 1.5e6]
phase_input = [0.0, pi]
########
bridge = rf.io.hfss_touchstone_2_network(\
'./data/Sparameters/WEST/WEST_ICRH_bridge.s3p', f_unit='MHz')
impedance_transformer = rf.io.hfss_touchstone_2_network(\
'./data/Sparameters/WEST/WEST_ICRH_impedance-transformer.s2p', f_unit='MHz')
window = rf.io.hfss_touchstone_2_network(\
'./data/Sparameters/WEST/WEST_ICRH_window.s2p', f_unit='MHz')
idx_f = np.argmin(np.abs(bridge.frequency.f - f_match))
def TOPICA_2_network(filename, z0):
proto9 = TopicaResult(filename, z0)
# we re-set the characteristic impedance in order to match the bridge characteric impedance
# TODO: for future TOPICA results, check that the CAD model characteristic impedance is coherent with the bridge model used.
proto9.z0 = [z0]*4
plasma_TOPICA = proto9.to_skrf_network(bridge.frequency, name='plasma')
return(plasma_TOPICA)
# # RAW data from TOPICA
#plasma = TOPICA_2_network('./data/TOPICA/ToreSupra_WEST/L-mode/TSproto12/Zs_TSproto12_50MHz_Profile1.txt', \
# z0=46.7)
# TOPICA matrices corrected from deembedding
plasma = rf.io.hfss_touchstone_2_network(\
'./data/Sparameters/WEST/plasma_from_TOPICA/S_TSproto12_55MHz_Profile8.s4p')
plasma.frequency = bridge.frequency
plasma.s = np.tile(plasma.s, (len(plasma.frequency), 1, 1))
plasma.z0 = np.tile(plasma.z0, (len(plasma.frequency),1))
CT1 = ConjugateT(bridge, impedance_transformer, window)
CT2 = ConjugateT(bridge, impedance_transformer, window)
RDL = ResonantDoubleLoop(CT1, CT2, plasma)
RDL.match(power_input, phase_input, f_match, z_match)
# Get results
act_vswr = RDL.get_vswr_active(power_input, phase_input)
act_S = RDL.get_s_active(power_input, phase_input)
I_plasma, V_plasma = RDL.get_currents_and_voltages(power_input, phase_input)
print(RDL.get_vswr_active(power_input, phase_input)[idx_f])
# Now the antenna has been matched on a specific antenna loading,
# we change this load and look to the VSWR.
Rc = np.array([1.06,1.34,1.57,1.81,2.05,2.45,2.65,2.91])
VSWR = []
for idx_plasma in range(1,9):
plasma = rf.io.hfss_touchstone_2_network(\
'./data/Sparameters/WEST/plasma_from_TOPICA/S_TSproto12_55MHz_Profile'+str(idx_plasma)+'.s4p')
plasma.frequency = bridge.frequency
plasma.s = np.tile(plasma.s, (len(plasma.frequency), 1, 1))
plasma.z0 = np.tile(plasma.z0, (len(plasma.frequency),1))
_RDL = ResonantDoubleLoop(CT1, CT2, plasma, C=RDL.C)
VSWR.append(_RDL.get_vswr_active(power_input, phase_input)[idx_f])
VSWR = np.array(VSWR)
figure(1)
clf()
fill_between(Rc, VSWR[:,0], VSWR[:,1], lw=2, alpha=0.2)
xlabel('Rc [$\Omega$]', fontsize=14)
ylabel('VSWR', fontsize=14)
grid(True)
xticks(fontsize=14)
yticks(fontsize=14)
# Match on a different point
plasma = rf.io.hfss_touchstone_2_network(\
'./data/Sparameters/WEST/plasma_from_TOPICA/S_TSproto12_55MHz_Profile1.s4p')
plasma.frequency = bridge.frequency
plasma.s = np.tile(plasma.s, (len(plasma.frequency), 1, 1))
plasma.z0 = np.tile(plasma.z0, (len(plasma.frequency),1))
RDL = ResonantDoubleLoop(CT1, CT2, plasma)
RDL.match(power_input, phase_input, f_match, z_match)
VSWR = []
for idx_plasma in range(1,9):
plasma = rf.io.hfss_touchstone_2_network(\
'./data/Sparameters/WEST/plasma_from_TOPICA/S_TSproto12_55MHz_Profile'+str(idx_plasma)+'.s4p')
plasma.frequency = bridge.frequency
plasma.s = np.tile(plasma.s, (len(plasma.frequency), 1, 1))
plasma.z0 = np.tile(plasma.z0, (len(plasma.frequency),1))
_RDL = ResonantDoubleLoop(CT1, CT2, plasma, C=RDL.C)
VSWR.append(_RDL.get_vswr_active(power_input, phase_input)[idx_f])
VSWR = np.array(VSWR)
fill_between(Rc, VSWR[:,0], VSWR[:,1], lw=2, alpha=0.2, color='g')
axhline(y=2, color='k', lw=2)
savefig('WEST_ICRH_VSWR_vs_Rc_ideal_matching.png', dpi=300) | mit |
amueller/pystruct | examples/plot_binary_svm.py | 4 | 3222 | """
==================
Binary SVM as SSVM
==================
Example of training binary SVM using n-slack QP, 1-slack QP, SGD and
SMO (libsvm). Our 1-slack QP does surprisingly well.
There are many parameters to tune and we can make 1-slack as good as the rest
for the price of higher runtime, we can also try to make the others faster.
We don't really have a chance to beat LibSVM but that's ok ;)
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVC
from pystruct.models import BinaryClf
from pystruct.learners import (NSlackSSVM, OneSlackSSVM,
SubgradientSSVM)
# do a binary digit classification
digits = load_digits()
X, y = digits.data, digits.target
# make binary task by doing odd vs even numers
y = y % 2
# code as +1 and -1
y = 2 * y - 1
X /= X.max()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
pbl = BinaryClf()
n_slack_svm = NSlackSSVM(pbl, C=10, batch_size=-1)
one_slack_svm = OneSlackSSVM(pbl, C=10, tol=0.1)
subgradient_svm = SubgradientSSVM(pbl, C=10, learning_rate=0.1, max_iter=100,
batch_size=10)
# we add a constant 1 feature for the bias
X_train_bias = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_test_bias = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
# n-slack cutting plane ssvm
start = time()
n_slack_svm.fit(X_train_bias, y_train)
time_n_slack_svm = time() - start
acc_n_slack = n_slack_svm.score(X_test_bias, y_test)
print("Score with pystruct n-slack ssvm: %f (took %f seconds)"
% (acc_n_slack, time_n_slack_svm))
## 1-slack cutting plane ssvm
start = time()
one_slack_svm.fit(X_train_bias, y_train)
time_one_slack_svm = time() - start
acc_one_slack = one_slack_svm.score(X_test_bias, y_test)
print("Score with pystruct 1-slack ssvm: %f (took %f seconds)"
% (acc_one_slack, time_one_slack_svm))
# online subgradient ssvm
start = time()
subgradient_svm.fit(X_train_bias, y_train)
time_subgradient_svm = time() - start
acc_subgradient = subgradient_svm.score(X_test_bias, y_test)
print("Score with pystruct subgradient ssvm: %f (took %f seconds)"
% (acc_subgradient, time_subgradient_svm))
libsvm = SVC(kernel='linear', C=10)
start = time()
libsvm.fit(X_train, y_train)
time_libsvm = time() - start
acc_libsvm = libsvm.score(X_test, y_test)
print("Score with sklearn and libsvm: %f (took %f seconds)"
% (acc_libsvm, time_libsvm))
# plot the results
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].bar(range(4), [time_n_slack_svm, time_one_slack_svm,
time_subgradient_svm, time_libsvm])
ax[0].set_xticks(np.arange(4) + .5)
ax[0].set_xticklabels(["n-slack", "1-slack", "subgradient", "libsvm"])
ax[0].set_ylabel("runtime (s)")
ax[0].set_title("Run times")
ax[1].set_title("Accuracies")
ax[1].bar(range(4), [acc_n_slack, acc_one_slack,
acc_subgradient, acc_libsvm])
ax[1].set_xticks(np.arange(4) + .5)
ax[1].set_xticklabels(["n-slack", "1-slack", "subgradient", "libsvm"])
ax[1].set_ylim((.8, 1))
ax[1].set_ylabel("accuracy")
plt.show()
| bsd-2-clause |
sonalranjit/GOCE_SECS-EICS | Single_grid_plotter/single_grid_plot.py | 1 | 2806 | __author__ = 'sonal'
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
def polar_plot(grid, col, title,filename):
z = grid[:,col]
plt.figure(figsize=(18,18))
ax = plt.gca()
#polar projection
m = Basemap(projection='npaeqd',boundinglat=30,lon_0=-100.,resolution='l')
# Lambert projection
#m = Basemap(width=8000000, height=8000000, resolution='l', projection='lcc',\
# lat_0=60,lon_0=-100.)
m.drawcoastlines()
m.drawparallels(np.arange(-80.,81,20.),labels=[1,0,0,0],fontsize=10)
m.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=10)
x,y =m(grid[:,7],grid[:,6])
sc = m.scatter(x,y,s=25,c=z,marker='.',cmap=cm.jet,alpha=0.9,edgecolors='none',vmin=min(z),vmax=max(z))
#sc = m.scatter(x,y,s=abs(z)/300,c=z/300,marker=',',cmap=cm.jet,alpha=0.9,edgecolors='none')
plt.title(title)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb1 = plt.colorbar(sc,cax=cax)
if col == 11 or col == 12:
cb1.set_label("mA/m",fontsize=18)
else:
cb1.set_label("A",fontsize=18)
plt.savefig(filename,bbox_inches='tight',pad_inches=0.2)
#plt.show()
def filter_grid(grid,col,high,low):
high_idx = np.where(grid[:,col]<high)[0]
high_grid = grid[high_idx,:]
low_idx = np.where(high_grid[:,col]>low)[0]
fil_grid = high_grid[low_idx,:]
return fil_grid
asc_grid = np.loadtxt('SECS_and_EICS_GRF_asc.txt')
des_grid = np.loadtxt('SECS_and_EICS_GRF_des.txt')
#Ascending
#Z: -30000:30000
#X: -15000:15000
#Y: -15000"15000
#Descending
#X: -15000:15000
#Y: -15000:15000
#Z: -35000:35000
asc_grid_x = filter_grid(asc_grid,11,15000,-15000)
asc_grid_y = filter_grid(asc_grid,12,15000,-15000)
asc_grid_z = filter_grid(asc_grid,13,30000,-30000)
des_grid_x = filter_grid(des_grid,11,15000,-15000)
des_grid_y = filter_grid(des_grid,12,15000,-15000)
des_grid_z = filter_grid(des_grid,13,30000,-30000)
polar_plot(asc_grid_x,11,'EICS Along-track in GRF (Ascending)','EICS_X_GRF_asc.png')
polar_plot(asc_grid_y,12,'EICS Cross-track in GRF (Ascending)','EICS_Y_GRF_asc.png')
polar_plot(asc_grid_z,13,'EICS Radial-track in GRF (Ascending)','EICS_Z_GRF_asc.png')
polar_plot(des_grid_x,11,'EICS Along-track in GRF (Descending)','EICS_X_GRF_des.png')
polar_plot(des_grid_y,12,'EICS Cross-track in GRF (Descending)','EICS_Y_GRF_des.png')
polar_plot(des_grid_z,13,'EICS Radial-track in GRF (Descending)','EICS_Z_GRF_des.png')
'''plt.plot(new_grid[:,14],new_grid[:,11],'.')
plt.title('EICS v component GRF Ascending Track Trace')
plt.ylabel('mA/m')
plt.show()
#plt.savefig('SECS_GRF_asc_trace.png',bbox_inches='tight',pad_inches=0.2)''' | gpl-2.0 |
wagavulin/arrow | python/pyarrow/__init__.py | 1 | 8314 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# flake8: noqa
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
try:
# This code is duplicated from setup.py to avoid a dependency on each
# other.
def parse_version(root):
from setuptools_scm import version_from_scm
import setuptools_scm.git
describe = (setuptools_scm.git.DEFAULT_DESCRIBE +
" --match 'apache-arrow-[0-9]*'")
# Strip catchall from the commandline
describe = describe.replace("--match *.*", "")
version = setuptools_scm.git.parse(root, describe)
if not version:
return version_from_scm(root)
else:
return version
import setuptools_scm
__version__ = setuptools_scm.get_version('../', parse=parse_version)
except (ImportError, LookupError):
__version__ = None
from pyarrow.lib import cpu_count, set_cpu_count
from pyarrow.lib import (null, bool_,
int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
time32, time64, timestamp, date32, date64,
float16, float32, float64,
binary, string, decimal128,
list_, struct, union, dictionary, field,
type_for_alias,
DataType, NAType,
Field,
Schema,
schema,
Array, Tensor,
array, chunked_array, column,
from_numpy_dtype,
NullArray,
NumericArray, IntegerArray, FloatingPointArray,
BooleanArray,
Int8Array, UInt8Array,
Int16Array, UInt16Array,
Int32Array, UInt32Array,
Int64Array, UInt64Array,
ListArray, UnionArray,
BinaryArray, StringArray,
FixedSizeBinaryArray,
DictionaryArray,
Date32Array, Date64Array,
TimestampArray, Time32Array, Time64Array,
Decimal128Array, StructArray,
ArrayValue, Scalar, NA,
BooleanValue,
Int8Value, Int16Value, Int32Value, Int64Value,
UInt8Value, UInt16Value, UInt32Value, UInt64Value,
HalfFloatValue, FloatValue, DoubleValue, ListValue,
BinaryValue, StringValue, FixedSizeBinaryValue,
DecimalValue,
Date32Value, Date64Value, TimestampValue)
# ARROW-1683: Remove after 0.8.0?
from pyarrow.lib import TimestampType
# Buffers, allocation
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
compress, decompress, allocate_buffer)
from pyarrow.lib import (MemoryPool, total_allocated_bytes,
set_memory_pool, default_memory_pool,
log_memory_allocations)
from pyarrow.lib import (HdfsFile, NativeFile, PythonFile,
FixedSizeBufferWriter,
BufferReader, BufferOutputStream,
OSFile, MemoryMappedFile, memory_map,
create_memory_map, have_libhdfs, have_libhdfs3,
MockOutputStream)
from pyarrow.lib import (ChunkedArray, Column, RecordBatch, Table,
concat_tables)
from pyarrow.lib import (ArrowException,
ArrowKeyError,
ArrowInvalid,
ArrowIOError,
ArrowMemoryError,
ArrowNotImplementedError,
ArrowTypeError,
ArrowSerializationError,
PlasmaObjectExists)
# Serialization
from pyarrow.lib import (deserialize_from, deserialize,
deserialize_components,
serialize, serialize_to, read_serialized,
SerializedPyObject, SerializationContext,
SerializationCallbackError,
DeserializationCallbackError)
from pyarrow.filesystem import FileSystem, LocalFileSystem
from pyarrow.hdfs import HadoopFileSystem
import pyarrow.hdfs as hdfs
from pyarrow.ipc import (Message, MessageReader,
RecordBatchFileReader, RecordBatchFileWriter,
RecordBatchStreamReader, RecordBatchStreamWriter,
read_message, read_record_batch, read_schema,
read_tensor, write_tensor,
get_record_batch_size, get_tensor_size,
open_stream,
open_file,
serialize_pandas, deserialize_pandas)
localfs = LocalFileSystem.get_instance()
from pyarrow.serialization import (default_serialization_context,
register_default_serialization_handlers,
register_torch_serialization_handlers)
import pyarrow.types as types
# Entry point for starting the plasma store
def _plasma_store_entry_point():
"""Entry point for starting the plasma store.
This can be used by invoking e.g.
``plasma_store -s /tmp/plasma -m 1000000000``
from the command line and will start the plasma_store executable with the
given arguments.
"""
import os
import pyarrow
import sys
plasma_store_executable = os.path.join(pyarrow.__path__[0], "plasma_store")
os.execv(plasma_store_executable, sys.argv)
# ----------------------------------------------------------------------
# Deprecations
from pyarrow.util import _deprecate_api # noqa
frombuffer = _deprecate_api('frombuffer', 'py_buffer', py_buffer, '0.9.0')
# ----------------------------------------------------------------------
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
# wheels)
def get_include():
"""
Return absolute path to directory containing Arrow C++ include
headers. Similar to numpy.get_include
"""
import os
return os.path.join(os.path.dirname(__file__), 'include')
def get_libraries():
"""
Return list of library names to include in the `libraries` argument for C
or Cython extensions using pyarrow
"""
return ['arrow_python']
def get_library_dirs():
"""
Return lists of directories likely to contain Arrow C++ libraries for
linking C or Cython extensions using pyarrow
"""
import os
import sys
package_cwd = os.path.dirname(__file__)
library_dirs = [package_cwd]
if sys.platform == 'win32':
# TODO(wesm): Is this necessary, or does setuptools within a conda
# installation add Library\lib to the linker path for MSVC?
site_packages, _ = os.path.split(package_cwd)
python_base_install, _ = os.path.split(site_packages)
library_dirs.append(os.path.join(python_base_install,
'Library', 'lib'))
return library_dirs
| apache-2.0 |
Thomsen22/MissingMoney | Peak Load Reserve - 24 Bus/dayahead_optclass.py | 1 | 10612 | # Python standard modules
import numpy as np
import gurobipy as gb
import networkx as nx
from collections import defaultdict
import pandas as pd
import math
# Own modules
import data_24bus as data
class expando(object):
'''
# A class for Day Ahead market clearing
'''
pass
class DayAhead:
def __init__(self):
self.data = expando()
self.variables = expando()
self.constraints = expando()
self._load_data()
self._build_model()
def optimize(self):
self.model.optimize()
m = self.model
generators = self.data.generators
times = self.data.times
gprod = self.variables.gprod
df_genprod = pd.DataFrame(index = times, data = {g: [self.variables.gprod[g,t].x for t in times] for g in generators.index})
dict_genprod = {}
for t in times:
for g in np.arange(len(generators.index)):
dict_genprod[df_genprod.columns[g], t] = df_genprod.ix[df_genprod.index[t], df_genprod.columns[g]]
hydroconstr = {}
for t in times:
for g in generators.index:
if generators.primaryfuel[g] == 'Hydro':
hydroconstr[g, t] = dict_genprod[g, t]
self.constraints.hydroconstr = {}
for t in times:
for g in generators.index:
if generators.primaryfuel[g] == "Hydro":
self.constraints.hydroconstr[g, t] = m.addConstr(gprod[g, t], gb.GRB.EQUAL, hydroconstr[g, t])
self.model.update()
self.model.reset()
self.model.optimize()
def _load_data(self):
self.data.consumption = data.load()
windproduction = data.windprod()
self.data.windproduction = windproduction
self.data.solarproduction = data.solarprod()
self.data.generators = data.load_generators()
self.data.network = data.load_network()
self.data.nodes = self.data.network.nodes()
self.data.times = np.arange(len(self.data.consumption.index)) # Consumption profile determines times
self.data.country = nx.get_node_attributes(self.data.network, 'country')
self.data.hydrocoeff = 0.473
# Assigning each node to a country (price-area, zone)
country = nx.get_node_attributes(self.data.network, 'country')
country = pd.Series(self.data.country, name='Zone')
country = country.reset_index()
country = country.rename(columns={'index': 'Node'})
self.data.countries = country
# Using defaultdict
zones_nodes = country[['Zone','Node']].values.tolist()
self.data.nodes_for_zones = defaultdict(list)
for Node,Zone in zones_nodes:
self.data.nodes_for_zones[Node].append(Zone)
# Assigning load to each node and time (and zonal consumption)
times = self.data.times
nodes = self.data.nodes
consumption = self.data.consumption
windproduction = self.data.windproduction
solarproduction = self.data.solarproduction
self.data.zones = ['Z1', 'Z2', 'Z3']
zones = self.data.zones
self.data.nodalconsumption = {}
for t in times:
for n in np.arange(len(nodes)):
self.data.nodalconsumption[consumption.columns[n], t] = consumption.ix[consumption.index[t], consumption.columns[n]]
self.data.zonalconsumption = {}
for t in times:
for z in zones:
self.data.zonalconsumption[z,t] = sum(self.data.nodalconsumption[n,t] for n in self.data.nodes_for_zones[z])
self.data.df_zonalconsumption = pd.DataFrame(index = times, data = {z: [self.data.zonalconsumption[z,t] for t in times] for z in zones})
self.data.windprod = {}
for t in times:
for n in np.arange(len(nodes)):
self.data.windprod[windproduction.columns[n], t] = windproduction.ix[windproduction.index[t], windproduction.columns[n]]
self.data.solarprod = {}
for t in times:
for n in np.arange(len(nodes)):
self.data.solarprod[solarproduction.columns[n], t] = solarproduction.ix[solarproduction.index[t], solarproduction.columns[n]]
self.data.generatorcost = {}
for g in self.data.generators.index:
self.data.generatorcost[g] = self.data.generators.lincost[g]
# Returns a list where each gen is assigned to the given zone
country_generator = self.data.generators[['country','name']].values.tolist()
self.data.gens_for_country = defaultdict(list)
for country, generator in country_generator:
self.data.gens_for_country[country].append(generator)
# Lines and lineinfo
self.data.lines = [('Z1', 'Z2'), ('Z2', 'Z3')]
self.data.lineinfo = {}
self.data.lineinfo[('Z1', 'Z2')] = {'linecapacity': 875, 'x': 1, 'otherinfo': []} #875
self.data.lineinfo[('Z2', 'Z3')] = {'linecapacity': 1500, 'x': 1, 'otherinfo': []} #1500
# VOLL assigned to each time
self.data.VoLL = {}
for t in times:
self.data.VoLL[t] = 3000 # Price cap is 3000 € in Nordpool Spot
def _build_model(self):
self.model = gb.Model()
self._build_variables()
self._build_objective()
self._build_constraints()
def _build_variables(self):
m = self.model
times = self.data.times
lines = self.data.lines
lineinfo = self.data.lineinfo
generators = self.data.generators
windproduction = self.data.windprod
solarproduction = self.data.solarprod
nodes_for_zones = self.data.nodes_for_zones
zones = self.data.zones
nodalconsumption = self.data.nodalconsumption
# Capacity on generators
self.variables.gprod = {}
for t in times:
for g in generators.index:
self.variables.gprod[g, t] = m.addVar(lb = 0, ub = generators.capacity[g])
# Loadshed assigned to each zone in the system at each time
self.variables.loadshed = {}
for t in times:
for z in zones:
self.variables.loadshed[z, t] = m.addVar(lb = 0, ub = sum(nodalconsumption[n,t] for n in nodes_for_zones[z]))
# Wind production assigned to each node in the system at each time
self.variables.windprod = {}
for t in times:
for z in zones:
self.variables.windprod[z, t] = m.addVar(lb = 0, ub = sum(windproduction[n,t] for n in nodes_for_zones[z]))
# Solar production assigned to each node in the system at each time
self.variables.solarprod = {}
for t in times:
for z in zones:
self.variables.solarprod[z, t] = m.addVar(lb = 0, ub = sum(solarproduction[n,t] for n in nodes_for_zones[z]))
# Export variable from each node at each time, (-infinity < export < infinity)
self.variables.export = {}
for t in times:
for z in zones:
self.variables.export[z, t] = m.addVar(lb = -math.inf, ub = math.inf)
# The linelimits between zones are inserted
self.variables.linelimit = {}
for t in times:
for l in lines: # New lines, check from beginning!
self.variables.linelimit[l, t] = m.addVar(lb=-lineinfo[l]['linecapacity'], ub=lineinfo[l]['linecapacity'])
m.update()
def _build_objective(self):
times = self.data.times
zones = self.data.zones
generators = self.data.generators
loadshed = self.variables.loadshed
gprod = self.variables.gprod
generatorcost = self.data.generatorcost
VoLL = self.data.VoLL
self.model.setObjective(
gb.quicksum(generatorcost[g] * gprod[g, t] for g in generators.index for t in times)
+ gb.quicksum(VoLL[t] * loadshed[z, t] for z in zones for t in times)
,gb.GRB.MINIMIZE)
def _build_constraints(self):
m = self.model
times = self.data.times
zones = self.data.zones
lines = self.data.lines
linelimit = self.variables.linelimit
Load = self.data.zonalconsumption
loadshed = self.variables.loadshed
gprod = self.variables.gprod
gens_for_country = self.data.gens_for_country
export = self.variables.export
windprod = self.variables.windprod
solarprod = self.variables.solarprod
generators = self.data.generators
gprod = self.variables.gprod
hydrocoeff = self.data.hydrocoeff
# Power Balance constraint in each zone at each time
self.constraints.powerbalance = {}
for z in zones:
for t in times:
self.constraints.powerbalance[z, t] = m.addConstr(
gb.quicksum(gprod[g, t] for g in gens_for_country[z])
+ windprod[z, t] + solarprod[z, t]
,gb.GRB.EQUAL,
(Load[z, t] - loadshed[z, t]) + export[z, t])
# Export constraint at each node at each time
self.constraints.exporting = {}
for t in times:
for z in zones:
self.constraints.exporting[z, t] = m.addConstr(
export[z, t], gb.GRB.EQUAL,
gb.quicksum(linelimit[l, t] for l in lines if l[0] == z) - gb.quicksum(linelimit[l, t] for l in lines if l[1] == z))
# Hydro constraint
self.constraints.hydro = {}
for g in generators.index:
if generators.primaryfuel[g] == "Hydro":
self.constraints.hydro[g] = m.addConstr(
gb.quicksum(gprod[g, t] for t in times), gb.GRB.LESS_EQUAL,
hydrocoeff * generators.capacity[g] * len(times))
| gpl-3.0 |
NDKoehler/DataScienceBowl2017_7th_place | dsb3_networks/classification/luna_resnet2D/config.py | 1 | 2927 | from collections import defaultdict
from datetime import datetime
import json
import tensorflow as tf
import os, sys
import pandas as pd
#config dic
H = defaultdict(lambda: None)
#All possible config options:
H['optimizer'] = 'MomentumOptimizer'#'RMSPropOptimizer'
H['learning_rate'] = 0.001
H['momentum'] = 0.9 #0.99
H['kernel_num'] = 16 #32
H['dropout_keep_prob'] = 1.0
H['gpu_fraction'] = 0.9
H['num_classes'] = 2
H['model_name'] = 'resnet2D'
H['pretrained_checkpoint_dir'] = ''
H['output_dir'] = 'output_dir/gold_prio3_plane_mil0' #cross_crop_retrain_zrot
H['predictions_dir'] = ''
H['allow_soft_placement'] = True
H['log_device_placement'] = False
H['max_steps'] = 15#*2#100000
H['MOVING_AVERAGE_DECAY'] = 0.9
H['BATCH_NORM_CENTER'] = True
H['BATCH_NORM_SCALE'] = True
H['weights_initializer'] = 'xavier_initializer' #'xavier_initializer', 'xavier_initializer_conv2d', 'truncated_normal_initializer'
H['gpus'] = [0]
H['summary_step'] = 10
# list iterator
#H['train_lst'] = '../data/joined/tr_concat.lst'
#H['val_lst'] = '../data/multiview-2/va.lst'
H['candidate_mode'] = True
H['importance_sampling'] = True
H['importance_dict'] = {0: 0.2, 1:1.0}
#H['importance_dict'] = {0: 1.0, 1:1.0}
H['train_lst'] = '../../datapipeline_final/LUNA16_0/interpolate_candidates/tr_candidates_binary.lst' #tr_candidates_binary
H['val_lst'] = '../../datapipeline_final/LUNA16_0/interpolate_candidates/va_candidates_binary.lst' #va_candidates_binary
# images
# in_image_shapes[1:] must be equal to len of crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax
H['in_image_shape'] = [1, 64, 64, 64, 2] #256
# not working #H['crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax'] = [False,False, False,False,False,False] # Default = False or None
H['image_shape'] = [1, 3, 64, 64, 2]
H['label_shape'] = [1] #256
H['batch_size'] = 32
H['plane_mil'] = False
# crossed axes options - cross is centrally cropped -> layers are stacked in z-dim
H['num_crossed_layers'] = 1
H['crossed_axes'] = [0,1,2] #False
H['rand_drop_planes'] = 0
# y and x image_shape must be equal -> z has same shape!!!
# you can crop if the equal z,y and x in image shape are and smaller than in in_image_shape
#iterator settings
H['load_in_ram'] = True
# due to time consuming operation and quality loss only rotation around one axis is processed randomly chosen
H['rand_rot_axes'] = [0]#,1,2] # 0: z, 1: y, 2: x (attention: x and y rotation lasts long)
H['rand_rot'] = True
H['min_rot_angle'] = -45 #degree
H['max_rot_angle'] = 45 #degree
H['degree_90_rot'] = True
H['rand_mirror_axes'] = [0,1,2] # 0: z, 1: y, 2: x else False
H['rand_cropping_ZminZmaxYminYmaxXminXmax'] = [31, 32,False,False,False,False] # crop within given range # default False: full range
H['save_step'] = 3 # saving checkpoint
H['tr_num_examples'] = len(pd.read_csv(H['train_lst'], header=None, sep='\t'))
H['va_num_examples'] = len(pd.read_csv(H['val_lst'], header=None, sep='\t'))
| mit |
sichenucsd/caffe_si | examples/web_demo/app.py | 10 | 7400 | import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image as PILImage
import cStringIO as StringIO
import urllib
import caffe
import exifutil
REPO_DIRNAME = os.path.abspath(os.path.dirname(__file__) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = PILImage.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/examples/imagenet/imagenet_deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/examples/imagenet/caffe_reference_imagenet_model'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 227
default_args['gpu_mode'] = True
def __init__(self, model_def_file, pretrained_model_file, mean_file,
class_labels_file, bet_file, image_dim, gpu_mode=False):
logging.info('Loading net and associated files...')
self.net = caffe.Classifier(
model_def_file, pretrained_model_file, input_scale=255,
image_dims=(image_dim, image_dim), gpu=gpu_mode,
mean_file=mean_file, channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
opts, args = parser.parse_args()
# Initialize classifier
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
| bsd-2-clause |
Shen-Lab/cNMA | Software/helperScripts/ModeSpectrumVisualizer.py | 1 | 17308 | '''
Created on Jun 2, 2014
@author: oliwa
'''
import glob
import argparse
import sys
import os
import numpy as np
from scriptutils import mkdir_p, customCopytree, getFullPathOfURI, makeStringEndWith, runBashCommand
from prody.dynamics.functions import loadModel
from collections import OrderedDict
from prody.proteins.pdbfile import parsePDB, writePDB
from prody.measure.measure import calcDeformVector
from prody.dynamics.compare import calcOverlap
from prody.dynamics.mode import Vector
from prody.proteins.compare import matchTNMAChains
from Hungarian import Hungarian
from prody.dynamics.editing import extendModel, sliceModel
import matplotlib.pyplot as plt
def main():
""" Visualizes normal modes memberships of an NMA experiment in a rectangle, where columns indicate the
membership (blue receptor, red ligand) and the tone of the color shows a linear relationship towards the overlap
towards the true deformation vector. """
parser = argparse.ArgumentParser(description='Visualizes normal modes memberships of an NMA experiment in a rectangle, where columns indicate the membership (blue receptor, red ligand) and the tone of the color shows a linear relationship towards the overlap towards the true deformation vector.')
parser.add_argument('resultsPath', help='Path to the experimental resultsfolder')
parser.add_argument('--extractNPZ', action="store_true", help='The NMA models are in a *.npz.tar.gz file. If this is not set, the input is expected in a *.npz subfolder in each experiment results folder')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
resultsPath = makeStringEndWith(getFullPathOfURI(args.resultsPath), "/")
folders = glob.glob(resultsPath+"*")
if args.extractNPZ:
extractNPZs(folders)
getAllModeInformation(folders, resultsPath)
def getAllModeInformation(folders, resultsPath):
fromMode = 12
toMode = 42
allModeMemberships = OrderedDict()
allModeOverlaps = OrderedDict()
for resultFolder in folders:
# load models
NPZfolder = glob.glob(resultFolder+"/*anms.npz")[0]
NPZ_reference = glob.glob(NPZfolder+"/*reference_ANM.anm.npz")[0]
NPZ_counterpart = glob.glob(NPZfolder+"/*anm_counterpart.anm.npz")[0]
NPZ_complex = glob.glob(NPZfolder+"/*anm_complex.anm.npz")[0]
anm_reference = loadModel(NPZ_reference)
anm_counterpart = loadModel(NPZ_counterpart)
anm_complex = loadModel(NPZ_complex)
assert anm_reference.getArray().shape[0] + anm_counterpart.getArray().shape[0] == anm_complex.getArray().shape[0]
# load resolution
resolution = getResolution(folders)
# load pdbs and deformation vector
unboundComplex = parsePDB(glob.glob(resultFolder+"/pdbs/*ucomplex.pdb")[0])
boundComplex = parsePDB(glob.glob(resultFolder+"/pdbs/*bcomplex.pdb")[0])
overallMatchComplex = getOverallMatch(unboundComplex, boundComplex, resolution)
defvec = calcDeformVector(overallMatchComplex[0], overallMatchComplex[1])
# get sliced ANM
anm_complex_slc = getSlicedANM(unboundComplex, overallMatchComplex[0], anm_complex)
# get mode memberships
proteinTitle = os.path.basename(resultFolder)
modeMemberships = getModeMemberships(anm_complex.getArray(), anm_reference.getArray().shape[0], anm_counterpart.getArray().shape[0])
allModeMemberships[proteinTitle] = modeMemberships
# get mode overlaps
modeOverlaps = getModeOverlaps(anm_complex_slc[0].getArray(), defvec)
allModeOverlaps[proteinTitle] = modeOverlaps
visualizeModeMemberships(allModeMemberships, allModeOverlaps, fromMode, toMode, resultsPath)
def visualizeModeMemberships(allModeMemberships, allModeOverlaps, fromMode, toMode, resultsPath):
dataMembership = None
dataOverlap = None
for k,v in allModeMemberships.items():
print k, v[fromMode:toMode], allModeOverlaps[k][fromMode:toMode]
if dataMembership is None:
dataMembership = np.array(v[fromMode:toMode])
dataOverlap = allModeOverlaps[k][fromMode:toMode]
else:
dataMembership = np.vstack((dataMembership, v[fromMode:toMode]))
dataOverlap = np.vstack((dataOverlap, allModeOverlaps[k][fromMode:toMode]))
print dataMembership
print dataOverlap
dataToPlot = combineMembershipAndOverlap(dataMembership, dataOverlap)
dataToPlot = add1k1kVector(dataToPlot, 1.5, -1.5)
print dataToPlot
column_labels = allModeMemberships.keys()
column_labels.append("1k1k_u")
print column_labels
row_labels = range(fromMode, toMode)
fig, ax = plt.subplots()
p = ax.pcolormesh(dataToPlot)
cbar = fig.colorbar(p)
cbar.set_ticks([-1.5, -0.5, 0.5, 1.5])
cbar.set_ticklabels(['L bad', 'L good', 'R good', 'R bad']) # put text labels on them
# # put the major ticks at the middle of each cell, notice "reverse" use of dimension
ax.set_yticks(np.arange(dataMembership.shape[0])+0.5, minor=False)
ax.set_xticks(np.arange(dataMembership.shape[1])+0.5, minor=False)
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
ax.xaxis.tick_top()
#plt.show()
plt.savefig(resultsPath+'/modeSpectrum.eps', bbox_inches='tight')
plt.savefig(resultsPath+'/modeSpectrum.pdf', bbox_inches='tight')
# close and reset the plot
plt.clf()
plt.cla()
plt.close()
def combineMembershipAndOverlap(dataMembership, dataOverlap):
assert dataMembership.shape == dataOverlap.shape
plotData = np.empty([dataMembership.shape[0], dataMembership.shape[1]])
for i in range(0, dataMembership.shape[0]):
for j in range(0, dataMembership.shape[1]):
if dataMembership[i][j] >= 0.0:
plotData[i][j] = dataMembership[i][j] - np.abs(dataOverlap[i][j])
else:
plotData[i][j] = dataMembership[i][j] + np.abs(dataOverlap[i][j])
print dataMembership[i][j], dataOverlap[i][j]
return plotData
def add1k1kVector(plotData, receptorValue, ligandValue):
k1kArray = np.empty(plotData.shape[1])
for i in range(0, plotData.shape[1]):
if i % 2 == 0:
k1kArray[i] = receptorValue
elif i % 2 == 1:
k1kArray[i] = ligandValue
plotData = np.vstack((plotData, k1kArray))
return plotData
def getModeMemberships(modeArray, receptorDim, ligandDim):
""" Returns the mode membership of a HC_0 model
Args:
modeArray: modes from a NMA in ProDy np array format (rows xyz of atoms, colums modes)
receptorDim: dimension of the receptor atoms XYZs (top part of a columvector mode)
ligandDim: dimension of the ligand atoms XYZs (bottom part of a columnvector mode)
Returns:
membership of modeArray (1.0 receptor, 0.0 ligand)
"""
modeMemberships = []
for mode in modeArray.T:
assert np.allclose(mode[:receptorDim], 0.0) or np.allclose(mode[receptorDim:receptorDim+ligandDim], 0.0)
if np.allclose(mode[:receptorDim], 0.0):
modeMemberships.append(-1.5)
else:
modeMemberships.append(1.5)
return modeMemberships
def getModeOverlaps(modeArray, defvec):
""" Returns the mode overals of a modeArray towards a deformation vector
Args:
modeArray: modes from a NMA in ProDy np array format (rows xyz of atoms, colums modes)
defvec: deformation vector
Returns:
list with overlaps of modes towards the deformation vector
"""
modeOverlaps = []
defvecArray = defvec.getArray()
for mode in modeArray.T:
assert mode.shape == defvecArray.shape
modeOverlaps.append(calcOverlap(Vector(mode), defvec))
print len(modeOverlaps)
return modeOverlaps
def extractNPZs(folders):
""" Extract the NPZs in each folder
Args:
folders: path to the resultsfolders
Result: extracted NPZs in each resultfolder
"""
for resultFolder in folders:
zipFileFullPath = glob.glob(resultFolder+"/*npz.tar.gz")[0]
zipFile = os.path.basename(zipFileFullPath)
extractDir = zipFile[0:zipFile.rfind(".tar")]
runBashCommand("mkdir "+resultFolder+"/"+extractDir)
runBashCommand("tar xfz "+zipFileFullPath+" -C "+resultFolder+"/"+extractDir)
def getResolution(folders):
""" Get resolution (calpha, bb, all) of the experiment. It is assumed for speed purposed that all subfolders have
the same resolution """
for resultFolder in folders:
configFile = glob.glob(resultFolder+"/*.py")[0]
whatAtomsToMatchLine = runBashCommand("grep self.whatAtomsToMatch "+configFile)
return whatAtomsToMatchLine[whatAtomsToMatchLine.find("\"")+1:whatAtomsToMatchLine.rfind("\"")]
def getOverallMatch(reference, mobile, subset):
"""
Performs a matching of chains of the two elements reference and mobile returns
the matches with all atoms as specified by subset.
At first, a modified version of matchChains is called that only uses the
pairwise alignment matching of prody, but keeps the prody defaults of
minimum seqid and overlap/coverage.
- If the matches are a perfect bisection, this result is used
and returned.
- Else, the hungarian algorithm is called to find the optimal
matches, and the result returned.
In case of the hungarian algorithm, the matchChains method has been
modified as follows the following addons:
1. pairwise alignment is enforced (from Bio.pairwise2)
2. pairwise alignment is the only matching algorithm, the prody
first choice of mapping based on residue numbers and type is
ignored
3. minimum seqid and overlap criteria are set to 0.00001, the actual
matching decision will be performed by the hungarian algorithm,
and pairwise alignment is only needed for the actual values of
seqid and overlap to create the cost matrix of the hungarian
algorithm
Remarks: prepareForHungarian needs to be set to True. Otherwise, the
ProDy matching sorts matched chains internally in decreasing order
of sequence identity, but this order is sometimes not the order of
chains in the PDB file.
Args:
reference: the unbound structure
mobile: the bound structure
subset: which matched atoms to return (calpha, bb, all ...)
Returns:
the overall match of chains from the given myTuple based on the
Bio.pairwise2 scores and possibly the hungarian algorithm
"""
matches = matchTNMAChains(reference,
mobile,
prepareForHungarian = True,
pwalign="True",
subset=subset)
# if the number of chains do not match, the behavior cannot be
# defined at this point
assert reference.numChains() == mobile.numChains()
if matches is None:
return doHungarianMatching(reference, mobile, subset)
elif not (reference.numChains() == mobile.numChains() == len(matches)):
return doHungarianMatching(reference, mobile, subset)
elif not isAOnetoOneMatch(matches):
return doHungarianMatching(reference, mobile, subset)
else:
# make overall match and return it
noMatchYet = True
for match in matches:
ref_chain = match[0]
mob_chain = match[1]
if noMatchYet:
overallRefMatch = ref_chain
overallMobMatch = mob_chain
noMatchYet = False
else:
overallRefMatch += ref_chain
overallMobMatch += mob_chain
if not noMatchYet:
overallMatch = [overallRefMatch, overallMobMatch]
else:
overallMatch = [ref_chain, mob_chain]
return overallMatch
def isAOnetoOneMatch( matches):
""" Return False if matches does not have a one to one match for each
chain, else return True.
It is assumed that len(matches) > 1 and that the number of matches
equals the number of either chains.
Args:
matches: matches as returns by matchTNMAchains
Returns: does matches contain a 1:1 perfect matching of the bisection
"""
baseSetUnbound = set(matches[0][0].getChids())
baseSetBound = set(matches[0][1].getChids())
assert len(baseSetUnbound) == 1, 'assert len(baseSetUnbound) == 1'
assert len(baseSetBound) == 1, 'assert len(baseSetBound) == 1'
for i in range(1, len(matches)):
addonSetUnbound = set(matches[i][0].getChids())
addonSetBound = set(matches[i][1].getChids())
assert len(addonSetUnbound) == 1, 'assert len(addonSetUnbound) == 1'
assert len(addonSetBound) == 1, 'assert len(addonSetBound) == 1'
if len(baseSetUnbound.intersection(addonSetUnbound)) > 0:
return False
elif len(baseSetBound.intersection(addonSetBound)) > 0:
return False
elif len(baseSetUnbound.intersection(addonSetUnbound)) == 0:
baseSetUnbound = baseSetUnbound.union(addonSetUnbound)
elif len(baseSetBound.intersection(addonSetBound)) == 0:
baseSetBound = baseSetBound.union(addonSetBound)
else:
print "**********\n\n\n set problem in isAOnetoOneMatch(...)"
sys.exit()
return True
def doHungarianMatching(reference, mobile, subset):
""" Do a chain matching with the help of the Hungarian Algorithm.
Args:
reference: a structure (for instance protein) to be matched
mobile: another structure (of for instance the same protein in a different conformational state) to be matched
subset: what atoms are considered for this matching (calpha, bb, all)
Returns:
object with the overall chain matchings
"""
print "Performing matching with the help of the Hungarian Algorithm."
seqid = 0.00001
overlap = 0.00001
matches = matchTNMAChains(reference,
mobile,
prepareForHungarian = True,
seqid=seqid,
overlap=overlap,
pwalign="True",
subset=subset)
hungarian = Hungarian()
indices, matchesMatrix = hungarian.getHungarianIndices(
reference.numChains(),
mobile.numChains(),
matches)
noMatchYet = True
for element in indices:
ref_chain = (matchesMatrix[element[0]][element[1]])[0]
mob_chain = (matchesMatrix[element[0]][element[1]])[1]
if noMatchYet:
overallRefMatch = ref_chain
overallMobMatch = mob_chain
noMatchYet = False
else:
overallRefMatch += ref_chain
overallMobMatch += mob_chain
if not noMatchYet:
overallMatch = [overallRefMatch, overallMobMatch]
else:
overallMatch = [ref_chain, mob_chain]
return overallMatch
def getSlicedANM(reference, ref_chain, anm_reference):
""" Get the sliced anm, given an already calculated anm_reference and matched chains """
# Extend the anm_reference on all atoms
anm_reference_extend = extendModel(anm_reference, reference.select('calpha'), reference, norm=True)
# Then slice the anm_reference to the matched
anm_reference_slc = sliceModel(anm_reference_extend[0], anm_reference_extend[1], ref_chain.getSelstr())
# Normalize the slices anm
anm_reference_slc = getNormalizedANM(anm_reference_slc)
return anm_reference_slc
def normalizeM(M):
""" Normalize a set of modes, which are the columnvectors in M.
Args:
M: set of modes as columnvectors
Returns: normalized (magnitude of each mode is 1) set of modes as columnvectors in M
"""
Mnormed = None
if M.ndim == 1:
modeVector = Vector(M)
return modeVector.getNormed().getArray()
else:
for element in M.T:
modeVector = Vector(element)
modeNormalized = modeVector.getNormed()
if Mnormed == None:
Mnormed = modeNormalized.getArray()
else:
Mnormed = np.column_stack((Mnormed, modeNormalized.getArray()))
return Mnormed
def getNormalizedANM(anm):
""" Normalize the modes of the anm and return this anm object
Args:
anm: the anm with modes calculated
Returns: anm with normalized modes
"""
M = normalizeM(anm[0].getArray())
eigenvals = anm[0].getEigvals()
anm[0].setEigens(M, eigenvals)
return anm
if __name__ == '__main__':
main() | mit |
jungla/ICOM-fluidity-toolbox | 2D/U/plot_W_t_2.py | 1 | 3591 | import os, sys
import vtktools
import fluidity_tools
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
#label = sys.argv[1]
#basename = sys.argv[2]
path = '../RST/stat_files/'
file1 = 'm_25_1b.stat'
filepath1 = path+file1
stat1 = fluidity_tools.stat_parser(filepath1)
file1b = 'm_25_1b_checkpoint.stat'
filepath1b = path+file1b
stat1b = fluidity_tools.stat_parser(filepath1b)
#file1c = 'm_25_1b_checkpoint_2.stat'
#filepath1c = path+file1c
#stat1c = fluidity_tools.stat_parser(filepath1c)
file2 = 'm_25_2b.stat'
filepath2 = path+file2
stat2 = fluidity_tools.stat_parser(filepath2)
file2b = 'm_25_2b_checkpoint.stat'
filepath2b = path+file2b
stat2b = fluidity_tools.stat_parser(filepath2b)
file2c = 'm_25_2b_checkpoint_2.stat'
filepath2c = path+file2c
stat2c = fluidity_tools.stat_parser(filepath2c)
#file2d = 'm_25_2_512_checkpoint_2.stat'
#filepath2d = path+file2d
#stat2d = fluidity_tools.stat_parser(filepath2d)
time1 = stat1["ElapsedTime"]["value"]/86400.0
time1b = stat1b["ElapsedTime"]["value"]/86400.0
#time1c = stat1c["ElapsedTime"]["value"]/86400.0
time2 = stat2["ElapsedTime"]["value"]/86400.0
time2b = stat2b["ElapsedTime"]["value"]/86400.0
time2c = stat2c["ElapsedTime"]["value"]/86400.0
#time2d = stat2d["ElapsedTime"]["value"]/86400.0
Vel1 = stat1["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]
Vel1b = stat1b["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]
#Vel1c = stat1c["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]
Vel2 = stat2["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]
Vel2b = stat2b["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]
Vel2c = stat2c["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]
#Vel2d = stat2d["BoussinesqFluid"]["Velocity_CG%3"]["l2norm"]
Vel1 = Vel1[np.where(time1<time1b[0])]
time1 = time1[np.where(time1<time1b[0])]
Vel1 = np.hstack((Vel1,Vel1b))
time1 = np.hstack((time1,time1b))
#Vel1 = Vel1[np.where(time1<time1c[0])]
#time1 = time1[np.where(time1<time1c[0])]
#Vel1 = np.hstack((Vel1,Vel1c))
#time1 = np.hstack((time1,time1c))
Vel2 = Vel2[np.where(time2<time2b[0])]
time2 = time2[np.where(time2<time2b[0])]
Vel2 = np.hstack((Vel2,Vel2b))
time2 = np.hstack((time2,time2b))
Vel2 = Vel2[np.where(time2<time2c[0])]
time2 = time2[np.where(time2<time2c[0])]
Vel2 = np.hstack((Vel2,Vel2c))
time2 = np.hstack((time2,time2c))
#Vel2 = Vel2[np.where(time2<time2d[0])]
#time2 = time2[np.where(time2<time2d[0])]
#Vel2 = np.hstack((Vel2,Vel2d))
#time2 = np.hstack((time2,time2d))
dayf = np.min((len(time1),len(time2)))
Vel1a = Vel1#[:dayf]
Vel2a = Vel2#[:dayf]
T1 = len(time1)
T2 = len(time2)
# volume
V1 = 4883616 #8000.0*8000.0*50 #270030 #135015 #1120*244 #*10000.0*4000.0*50
V2 = 4883616 #8000.0*8000.0*50 #168660 #84330 #1280*240 #*8000.0*8000.0*50
# plot KE
fig = plt.figure(figsize=(6,3))
#T10, = plt.plot(time0[np.where(time0<=4)], Vel0a[np.where(time0<=4)]/V, 'r-',linewidth=1.5)
TB25, = plt.plot(time1[np.where(time1<=9)], Vel1a[np.where(time1<=9)]/V1, 'r',linewidth=1.5)
TBW25, = plt.plot(time2[np.where(time2<=9)], Vel2a[np.where(time2<=9)]/V2, 'b',linewidth=1.5)
#plt.ylim([0.0014, 0.00142])
plt.xlim([0, 13])
plt.xticks(np.linspace(0,9,10),np.linspace(0,9,10).astype(int))
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.plot([3,3],[0,6e-9],'k--')
#plt.xlim([0.920, 0.980])
plt.legend([TB25,TBW25],['$B$','$BW$'])
#plt.legend([TB25],['$B25_m$'])
#plt.plot(time1, KE1/V, 'k',linewidth=1.5)
plt.xlabel("Time $[days]$",fontsize=18)
plt.ylabel("mean |W| $[ms^{-1}]$",fontsize=18)
plt.tight_layout()
plt.savefig('./plot/W_t.eps')
plt.close()
print 'saved '+'./plot/W_t.eps'
| gpl-2.0 |
tillahoffmann/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
imaculate/scikit-learn | sklearn/datasets/svmlight_format.py | 19 | 16759 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.